file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Example of a very simple runtime that can perform two types of transaction:
//! increment and reset counter in the service instance.
#![allow(clippy::unnecessary_wraps)]
use exonum::{
blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys},
helpers::Height,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService {
_name: instance.name.to_owned(),
..SampleService::default()
})
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver {
Receiver::with_result(self.deploy_artifact(artifact, spec))
}
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
}
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if!payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn | (&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
.get_instance(instance_name)
.unwrap();
assert_eq!(state.status.unwrap(), InstanceStatus::Active);
let instance_id = state.spec.id;
// Send an update counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes());
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
// Send a reset counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]);
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
shutdown_handle.shutdown().await.unwrap();
}
#[tokio::main]
async fn main() {
exonum::helpers::init_logger().unwrap();
println!("Creating database in temporary dir...");
let db = TemporaryDB::new();
let (node_cfg, node_keys) = node_config();
let consensus_config = node_cfg.consensus.clone();
let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config);
let mut rt = RustRuntime::builder();
Supervisor::simple().deploy(&mut genesis_config, &mut rt);
println!("Creating blockchain with additional runtime...");
let node = NodeBuilder::new(db, node_cfg, node_keys)
.with_genesis_config(genesis_config.build())
.with_runtime(SampleRuntime::default())
.with_runtime_fn(|channel| {
RustRuntime::builder()
.with_factory(Supervisor)
.build(channel.endpoints_sender())
})
.build();
let shutdown_handle = node.shutdown_handle();
println!("Starting a single node...");
println!("Blockchain is ready for transactions!");
let blockchain = node.blockchain().clone();
let node_task = node.run().unwrap_or_else(|e| panic!("{}", e));
let node_task = tokio::spawn(node_task);
examine_runtime(blockchain, shutdown_handle).await;
node_task.await.unwrap();
}
| after_commit | identifier_name |
game.rs | use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use tokio::sync::{
mpsc::{self, error::TrySendError},
oneshot,
};
use tokio::time;
use logic::components::{Movement, WorldInteraction};
use logic::legion::prelude::{Entity, World};
use logic::resources::DeadEntities;
use logic::snapshot::SnapshotEncoder;
use protocol::{
Action, ActionKind, EntityId, Event, EventKind, GameOver, PlayerId, Request, RequestKind,
Response, ResponseKind, Snapshot,
};
/// How many times per second to update the game world.
const TICK_RATE: u32 = 60;
/// The maximum number of events to buffer per player.
const EVENT_BUFFER_SIZE: usize = 1024;
pub struct Game {
players: BTreeMap<PlayerId, PlayerData>,
receiver: mpsc::Receiver<Command>,
world: World,
executor: logic::Executor,
snapshots: SnapshotEncoder,
time: u32,
}
#[derive(Debug, Clone)]
struct PlayerData {
entity: Entity,
network_id: EntityId,
events: mpsc::Sender<Event>,
}
#[derive(Debug)]
pub struct PlayerHandle {
player: PlayerId,
events: mpsc::Receiver<Event>,
}
#[derive(Debug, Clone)]
pub struct GameHandle {
sender: mpsc::Sender<Command>,
}
#[derive(Debug)]
enum Command {
Request {
request: Request,
callback: Callback<Response>,
},
RegisterPlayer {
callback: Callback<PlayerHandle>,
},
DisconnectPlayer(PlayerId),
Snapshot {
callback: Callback<Snapshot>,
},
PerformAction {
action: Action,
player: PlayerId,
},
}
struct Callback<T> {
sender: oneshot::Sender<T>,
}
// We don't care what the callback contains, simply print the expected return type.
impl<T> Debug for Callback<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Callback<{}>", std::any::type_name::<T>())
}
}
impl Game {
/// Create a new game alongside a handle to thet game.
pub fn new() -> (Game, GameHandle) {
let (sender, receiver) = mpsc::channel(1024);
let world = logic::create_world(logic::WorldKind::WithObjects);
let schedule = logic::add_systems(Default::default(), logic::SystemSet::Everything);
let executor = logic::Executor::new(schedule);
let game = Game {
players: BTreeMap::new(),
receiver,
world,
executor,
snapshots: SnapshotEncoder::new(),
time: 0,
};
let handle = GameHandle { sender };
(game, handle)
}
/// Run the game to completion (either the handle is dropped or a fatal error occurs).
pub async fn run(&mut self) {
let mut timer = time::interval(time::Duration::from_secs(1) / TICK_RATE);
loop {
tokio::select! {
_ = timer.tick() => {
self.tick();
}
command = self.receiver.recv() => match command {
None => {
log::info!("game handle dropped");
break;
},
Some(command) => {
log::debug!("got command: {:?}", command);
self.execute_command(command);
}
}
};
}
}
fn tick(&mut self) {
self.executor.tick(&mut self.world);
self.snapshots.update_mapping(&self.world);
self.check_win_condition();
let mut events = Vec::<EventKind>::new();
let snapshot = Arc::new(self.snapshot());
events.push(snapshot.into());
for event in events {
self.broadcast(event);
}
self.time = self.time.wrapping_add(1);
}
fn broadcast<T>(&mut self, kind: T)
where
T: Into<EventKind>,
{
let event = Event {
time: self.time,
kind: kind.into(),
};
let mut dead = Vec::new();
for (&id, player) in &mut self.players {
match player.events.try_send(event.clone()) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
log::warn!("player {}'s event buffer is full", id);
dead.push(id);
// TODO: request full client resync
}
Err(TrySendError::Closed(_)) => {
log::info!("player {} stopped listening for events", id);
dead.push(id);
// TODO: stop attempting to send events to this player, and potentially
// disconnect them.
}
}
}
for player in dead {
self.remove_player(player);
}
}
fn remove_player(&mut self, player: PlayerId) -> Option<PlayerData> {
let data = self.players.remove(&player)?;
self.world.delete(data.entity);
self.world
.resources
.get_mut::<DeadEntities>()
.unwrap()
.entities
.push(data.network_id);
Some(data)
}
/// Check if any player has won or lost.
fn check_win_condition(&mut self) {
let dead = self.world.resources.get::<DeadEntities>().unwrap();
let mut losers = Vec::new();
for (&player, data) in &self.players {
if dead.entities.contains(&data.network_id) {
losers.push(player);
}
}
drop(dead);
for loser in losers {
let mut player = self.players.remove(&loser).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Loser),
};
tokio::spawn(async move { player.events.send(event).await });
if self.players.len() == 1 {
let winner = *self.players.keys().next().unwrap();
let mut player = self.remove_player(winner).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Winner),
};
tokio::spawn(async move { player.events.send(event).await });
}
}
}
/// Execute a command.
fn execute_command(&mut self, command: Command) {
match command {
Command::RegisterPlayer { callback } => {
callback.send(self.register_player());
}
Command::DisconnectPlayer(player) => {
self.remove_player(player);
}
Command::Request { callback, request } => {
let message = self.handle_request(request);
callback.send(message);
}
Command::Snapshot { callback } => {
let snapshot = self.snapshot();
callback.send(snapshot);
}
Command::PerformAction { action, player } => self.perform_action(action, player),
}
}
/// Create and register a new player
fn register_player(&mut self) -> PlayerHandle {
let player = self.next_player_id();
let entity = logic::add_player(&mut self.world, player);
let (sender, receiver) = mpsc::channel(EVENT_BUFFER_SIZE);
let network_id = *self.world.get_component::<EntityId>(entity).unwrap();
let data = PlayerData {
network_id,
entity,
events: sender,
};
self.players.insert(player, data);
PlayerHandle {
player,
events: receiver,
}
}
/// Find the next available player id
fn next_player_id(&self) -> PlayerId {
let mut id = 1;
for player in self.players.keys() {
if player.0 == id {
id += 1;
} else {
break;
}
}
PlayerId(id)
}
/// Perform the request and return the result in a message
fn handle_request(&mut self, request: Request) -> Response {
let kind = match request.kind {
RequestKind::Ping => protocol::Pong.into(),
RequestKind::Init => {
let error = "Requested 'Init' on already initialized player";
ResponseKind::Error(error.into())
}
};
Response {
channel: request.channel,
kind,
}
}
/// Get a snapshot of the current game state.
fn snapshot(&self) -> Snapshot {
self.snapshots.make_snapshot(&self.world)
}
/// Perform an action for a player.
fn perform_action(&mut self, action: Action, player: PlayerId) {
match action.kind {
ActionKind::Move(new) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let mut movement = self.world.get_component_mut::<Movement>(data.entity)?;
movement.direction = new.direction;
Some(())
}();
}
ActionKind::Break(breaking) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let breaking = breaking
.entity
.and_then(|breaking| self.snapshots.lookup(breaking));
self.world
.get_component_mut::<WorldInteraction>(data.entity)?
.breaking = breaking;
Some(())
}();
}
ActionKind::Throw(throwing) => {
if let Some(data) = self.players.get(&player) {
logic::events::throw(&mut self.world, data.entity, throwing.target);
}
}
}
}
}
impl GameHandle {
/// Register a new client and return it's id.
pub async fn register_player(&mut self) -> crate::Result<PlayerHandle> {
self.send_with(|callback| Command::RegisterPlayer { callback })
.await
}
/// Remove a player from the game.
pub async fn disconnect_player(&mut self, player: PlayerId) -> crate::Result<()> {
self.sender.send(Command::DisconnectPlayer(player)).await?;
Ok(())
}
/// Handle a request made by a player.
pub async fn handle_request(&mut self, request: Request) -> crate::Result<Response> {
self.send_with(move |callback| Command::Request { request, callback })
.await
}
/// Get a snapshot of the current game state.
pub async fn snapshot(&mut self) -> crate::Result<Snapshot> {
self.send_with(|callback| Command::Snapshot { callback })
.await
}
/// Handle an action performed by a player
pub async fn handle_action(&mut self, action: Action, player: PlayerId) -> crate::Result<()> {
self.sender
.send(Command::PerformAction { action, player })
.await?;
Ok(())
}
/// Send a command to the game with the specified callback and then return the value passed into
/// the callback.
async fn send_with<F, O>(&mut self, to_command: F) -> crate::Result<O>
where
F: FnOnce(Callback<O>) -> Command,
{
let (callback, value) = Callback::new();
let command = to_command(callback);
self.sender.send(command).await?;
value.await.map_err(Into::into)
}
}
impl PlayerHandle {
/// Get the id of this player
pub fn id(&self) -> PlayerId {
self.player
}
pub async fn poll_event(&mut self) -> Option<Event> {
self.events.recv().await
}
}
impl<T> Callback<T> {
/// Create a new callback
pub fn new() -> (Callback<T>, oneshot::Receiver<T>) |
/// Attempt to send the value, returning false if the receiver was closed.
pub fn send(self, value: T) -> bool {
match self.sender.send(value) {
Ok(()) => true,
Err(_) => false,
}
}
}
| {
let (sender, receiver) = oneshot::channel();
(Callback { sender }, receiver)
} | identifier_body |
game.rs | use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use tokio::sync::{
mpsc::{self, error::TrySendError},
oneshot,
};
use tokio::time;
use logic::components::{Movement, WorldInteraction};
use logic::legion::prelude::{Entity, World};
use logic::resources::DeadEntities;
use logic::snapshot::SnapshotEncoder;
use protocol::{
Action, ActionKind, EntityId, Event, EventKind, GameOver, PlayerId, Request, RequestKind,
Response, ResponseKind, Snapshot,
};
/// How many times per second to update the game world.
const TICK_RATE: u32 = 60;
/// The maximum number of events to buffer per player.
const EVENT_BUFFER_SIZE: usize = 1024;
pub struct Game {
players: BTreeMap<PlayerId, PlayerData>,
receiver: mpsc::Receiver<Command>,
world: World,
executor: logic::Executor,
snapshots: SnapshotEncoder,
time: u32,
}
#[derive(Debug, Clone)]
struct PlayerData {
entity: Entity,
network_id: EntityId,
events: mpsc::Sender<Event>,
}
#[derive(Debug)]
pub struct PlayerHandle {
player: PlayerId,
events: mpsc::Receiver<Event>,
}
#[derive(Debug, Clone)]
pub struct GameHandle {
sender: mpsc::Sender<Command>,
}
#[derive(Debug)]
enum Command {
Request {
request: Request,
callback: Callback<Response>,
},
RegisterPlayer {
callback: Callback<PlayerHandle>,
},
DisconnectPlayer(PlayerId),
Snapshot {
callback: Callback<Snapshot>,
},
PerformAction {
action: Action,
player: PlayerId,
},
}
struct Callback<T> {
sender: oneshot::Sender<T>,
}
// We don't care what the callback contains, simply print the expected return type.
impl<T> Debug for Callback<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Callback<{}>", std::any::type_name::<T>())
}
}
impl Game {
/// Create a new game alongside a handle to thet game.
pub fn new() -> (Game, GameHandle) {
let (sender, receiver) = mpsc::channel(1024);
let world = logic::create_world(logic::WorldKind::WithObjects);
let schedule = logic::add_systems(Default::default(), logic::SystemSet::Everything);
let executor = logic::Executor::new(schedule);
let game = Game {
players: BTreeMap::new(),
receiver,
world,
executor,
snapshots: SnapshotEncoder::new(),
time: 0,
};
let handle = GameHandle { sender };
(game, handle)
}
/// Run the game to completion (either the handle is dropped or a fatal error occurs).
pub async fn run(&mut self) {
let mut timer = time::interval(time::Duration::from_secs(1) / TICK_RATE);
loop {
tokio::select! {
_ = timer.tick() => {
self.tick();
}
command = self.receiver.recv() => match command {
None => {
log::info!("game handle dropped");
break;
},
Some(command) => {
log::debug!("got command: {:?}", command);
self.execute_command(command);
}
}
};
}
}
fn tick(&mut self) {
self.executor.tick(&mut self.world);
self.snapshots.update_mapping(&self.world);
self.check_win_condition();
let mut events = Vec::<EventKind>::new();
let snapshot = Arc::new(self.snapshot());
events.push(snapshot.into());
for event in events {
self.broadcast(event);
}
self.time = self.time.wrapping_add(1);
}
fn broadcast<T>(&mut self, kind: T)
where
T: Into<EventKind>,
{
let event = Event {
time: self.time,
kind: kind.into(),
};
let mut dead = Vec::new();
for (&id, player) in &mut self.players {
match player.events.try_send(event.clone()) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
log::warn!("player {}'s event buffer is full", id);
dead.push(id);
// TODO: request full client resync
}
Err(TrySendError::Closed(_)) => {
log::info!("player {} stopped listening for events", id);
dead.push(id);
// TODO: stop attempting to send events to this player, and potentially
// disconnect them.
}
}
}
for player in dead {
self.remove_player(player);
}
}
fn remove_player(&mut self, player: PlayerId) -> Option<PlayerData> {
let data = self.players.remove(&player)?;
self.world.delete(data.entity);
self.world
.resources
.get_mut::<DeadEntities>()
.unwrap()
.entities
.push(data.network_id);
Some(data)
}
/// Check if any player has won or lost.
fn check_win_condition(&mut self) {
let dead = self.world.resources.get::<DeadEntities>().unwrap();
let mut losers = Vec::new();
for (&player, data) in &self.players {
if dead.entities.contains(&data.network_id) {
losers.push(player);
}
}
drop(dead);
for loser in losers {
let mut player = self.players.remove(&loser).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Loser),
};
tokio::spawn(async move { player.events.send(event).await });
if self.players.len() == 1 {
let winner = *self.players.keys().next().unwrap();
let mut player = self.remove_player(winner).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Winner),
};
tokio::spawn(async move { player.events.send(event).await });
}
}
}
/// Execute a command.
fn execute_command(&mut self, command: Command) {
match command {
Command::RegisterPlayer { callback } => {
callback.send(self.register_player());
}
Command::DisconnectPlayer(player) => {
self.remove_player(player);
}
Command::Request { callback, request } => {
let message = self.handle_request(request);
callback.send(message);
}
Command::Snapshot { callback } => {
let snapshot = self.snapshot();
callback.send(snapshot);
}
Command::PerformAction { action, player } => self.perform_action(action, player),
}
}
/// Create and register a new player
fn register_player(&mut self) -> PlayerHandle {
let player = self.next_player_id();
let entity = logic::add_player(&mut self.world, player);
let (sender, receiver) = mpsc::channel(EVENT_BUFFER_SIZE);
let network_id = *self.world.get_component::<EntityId>(entity).unwrap();
let data = PlayerData {
network_id,
entity,
events: sender,
};
self.players.insert(player, data);
PlayerHandle {
player,
events: receiver,
}
}
/// Find the next available player id
fn next_player_id(&self) -> PlayerId {
let mut id = 1;
for player in self.players.keys() {
if player.0 == id {
id += 1;
} else {
break;
}
}
PlayerId(id)
}
/// Perform the request and return the result in a message
fn handle_request(&mut self, request: Request) -> Response {
let kind = match request.kind {
RequestKind::Ping => protocol::Pong.into(),
RequestKind::Init => {
let error = "Requested 'Init' on already initialized player";
ResponseKind::Error(error.into())
}
};
Response {
channel: request.channel,
kind,
}
}
/// Get a snapshot of the current game state.
fn snapshot(&self) -> Snapshot {
self.snapshots.make_snapshot(&self.world)
}
/// Perform an action for a player.
fn perform_action(&mut self, action: Action, player: PlayerId) {
match action.kind {
ActionKind::Move(new) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let mut movement = self.world.get_component_mut::<Movement>(data.entity)?;
movement.direction = new.direction;
Some(())
}();
}
ActionKind::Break(breaking) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let breaking = breaking
.entity
.and_then(|breaking| self.snapshots.lookup(breaking));
self.world
.get_component_mut::<WorldInteraction>(data.entity)?
.breaking = breaking;
Some(())
}();
}
ActionKind::Throw(throwing) => {
if let Some(data) = self.players.get(&player) {
logic::events::throw(&mut self.world, data.entity, throwing.target);
}
}
}
}
}
impl GameHandle {
/// Register a new client and return it's id.
pub async fn register_player(&mut self) -> crate::Result<PlayerHandle> {
self.send_with(|callback| Command::RegisterPlayer { callback })
.await
}
/// Remove a player from the game.
pub async fn disconnect_player(&mut self, player: PlayerId) -> crate::Result<()> {
self.sender.send(Command::DisconnectPlayer(player)).await?;
Ok(())
}
/// Handle a request made by a player.
pub async fn handle_request(&mut self, request: Request) -> crate::Result<Response> {
self.send_with(move |callback| Command::Request { request, callback })
.await
}
/// Get a snapshot of the current game state.
pub async fn | (&mut self) -> crate::Result<Snapshot> {
self.send_with(|callback| Command::Snapshot { callback })
.await
}
/// Handle an action performed by a player
pub async fn handle_action(&mut self, action: Action, player: PlayerId) -> crate::Result<()> {
self.sender
.send(Command::PerformAction { action, player })
.await?;
Ok(())
}
/// Send a command to the game with the specified callback and then return the value passed into
/// the callback.
async fn send_with<F, O>(&mut self, to_command: F) -> crate::Result<O>
where
F: FnOnce(Callback<O>) -> Command,
{
let (callback, value) = Callback::new();
let command = to_command(callback);
self.sender.send(command).await?;
value.await.map_err(Into::into)
}
}
impl PlayerHandle {
/// Get the id of this player
pub fn id(&self) -> PlayerId {
self.player
}
pub async fn poll_event(&mut self) -> Option<Event> {
self.events.recv().await
}
}
impl<T> Callback<T> {
/// Create a new callback
pub fn new() -> (Callback<T>, oneshot::Receiver<T>) {
let (sender, receiver) = oneshot::channel();
(Callback { sender }, receiver)
}
/// Attempt to send the value, returning false if the receiver was closed.
pub fn send(self, value: T) -> bool {
match self.sender.send(value) {
Ok(()) => true,
Err(_) => false,
}
}
}
| snapshot | identifier_name |
game.rs | use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
use tokio::sync::{
mpsc::{self, error::TrySendError},
oneshot,
};
use tokio::time;
use logic::components::{Movement, WorldInteraction};
use logic::legion::prelude::{Entity, World};
use logic::resources::DeadEntities;
use logic::snapshot::SnapshotEncoder;
use protocol::{
Action, ActionKind, EntityId, Event, EventKind, GameOver, PlayerId, Request, RequestKind,
Response, ResponseKind, Snapshot,
};
/// How many times per second to update the game world.
const TICK_RATE: u32 = 60;
/// The maximum number of events to buffer per player.
const EVENT_BUFFER_SIZE: usize = 1024;
pub struct Game {
players: BTreeMap<PlayerId, PlayerData>,
receiver: mpsc::Receiver<Command>,
world: World,
executor: logic::Executor,
snapshots: SnapshotEncoder,
time: u32,
}
#[derive(Debug, Clone)]
struct PlayerData {
entity: Entity,
network_id: EntityId,
events: mpsc::Sender<Event>,
}
#[derive(Debug)]
pub struct PlayerHandle {
player: PlayerId,
events: mpsc::Receiver<Event>,
}
#[derive(Debug, Clone)] | #[derive(Debug)]
enum Command {
Request {
request: Request,
callback: Callback<Response>,
},
RegisterPlayer {
callback: Callback<PlayerHandle>,
},
DisconnectPlayer(PlayerId),
Snapshot {
callback: Callback<Snapshot>,
},
PerformAction {
action: Action,
player: PlayerId,
},
}
struct Callback<T> {
sender: oneshot::Sender<T>,
}
// We don't care what the callback contains, simply print the expected return type.
impl<T> Debug for Callback<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Callback<{}>", std::any::type_name::<T>())
}
}
impl Game {
/// Create a new game alongside a handle to thet game.
pub fn new() -> (Game, GameHandle) {
let (sender, receiver) = mpsc::channel(1024);
let world = logic::create_world(logic::WorldKind::WithObjects);
let schedule = logic::add_systems(Default::default(), logic::SystemSet::Everything);
let executor = logic::Executor::new(schedule);
let game = Game {
players: BTreeMap::new(),
receiver,
world,
executor,
snapshots: SnapshotEncoder::new(),
time: 0,
};
let handle = GameHandle { sender };
(game, handle)
}
/// Run the game to completion (either the handle is dropped or a fatal error occurs).
pub async fn run(&mut self) {
let mut timer = time::interval(time::Duration::from_secs(1) / TICK_RATE);
loop {
tokio::select! {
_ = timer.tick() => {
self.tick();
}
command = self.receiver.recv() => match command {
None => {
log::info!("game handle dropped");
break;
},
Some(command) => {
log::debug!("got command: {:?}", command);
self.execute_command(command);
}
}
};
}
}
fn tick(&mut self) {
self.executor.tick(&mut self.world);
self.snapshots.update_mapping(&self.world);
self.check_win_condition();
let mut events = Vec::<EventKind>::new();
let snapshot = Arc::new(self.snapshot());
events.push(snapshot.into());
for event in events {
self.broadcast(event);
}
self.time = self.time.wrapping_add(1);
}
fn broadcast<T>(&mut self, kind: T)
where
T: Into<EventKind>,
{
let event = Event {
time: self.time,
kind: kind.into(),
};
let mut dead = Vec::new();
for (&id, player) in &mut self.players {
match player.events.try_send(event.clone()) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
log::warn!("player {}'s event buffer is full", id);
dead.push(id);
// TODO: request full client resync
}
Err(TrySendError::Closed(_)) => {
log::info!("player {} stopped listening for events", id);
dead.push(id);
// TODO: stop attempting to send events to this player, and potentially
// disconnect them.
}
}
}
for player in dead {
self.remove_player(player);
}
}
fn remove_player(&mut self, player: PlayerId) -> Option<PlayerData> {
let data = self.players.remove(&player)?;
self.world.delete(data.entity);
self.world
.resources
.get_mut::<DeadEntities>()
.unwrap()
.entities
.push(data.network_id);
Some(data)
}
/// Check if any player has won or lost.
fn check_win_condition(&mut self) {
let dead = self.world.resources.get::<DeadEntities>().unwrap();
let mut losers = Vec::new();
for (&player, data) in &self.players {
if dead.entities.contains(&data.network_id) {
losers.push(player);
}
}
drop(dead);
for loser in losers {
let mut player = self.players.remove(&loser).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Loser),
};
tokio::spawn(async move { player.events.send(event).await });
if self.players.len() == 1 {
let winner = *self.players.keys().next().unwrap();
let mut player = self.remove_player(winner).unwrap();
let event = Event {
time: self.time,
kind: EventKind::GameOver(GameOver::Winner),
};
tokio::spawn(async move { player.events.send(event).await });
}
}
}
/// Execute a command.
fn execute_command(&mut self, command: Command) {
match command {
Command::RegisterPlayer { callback } => {
callback.send(self.register_player());
}
Command::DisconnectPlayer(player) => {
self.remove_player(player);
}
Command::Request { callback, request } => {
let message = self.handle_request(request);
callback.send(message);
}
Command::Snapshot { callback } => {
let snapshot = self.snapshot();
callback.send(snapshot);
}
Command::PerformAction { action, player } => self.perform_action(action, player),
}
}
/// Create and register a new player
fn register_player(&mut self) -> PlayerHandle {
let player = self.next_player_id();
let entity = logic::add_player(&mut self.world, player);
let (sender, receiver) = mpsc::channel(EVENT_BUFFER_SIZE);
let network_id = *self.world.get_component::<EntityId>(entity).unwrap();
let data = PlayerData {
network_id,
entity,
events: sender,
};
self.players.insert(player, data);
PlayerHandle {
player,
events: receiver,
}
}
/// Find the next available player id
fn next_player_id(&self) -> PlayerId {
let mut id = 1;
for player in self.players.keys() {
if player.0 == id {
id += 1;
} else {
break;
}
}
PlayerId(id)
}
/// Perform the request and return the result in a message
fn handle_request(&mut self, request: Request) -> Response {
let kind = match request.kind {
RequestKind::Ping => protocol::Pong.into(),
RequestKind::Init => {
let error = "Requested 'Init' on already initialized player";
ResponseKind::Error(error.into())
}
};
Response {
channel: request.channel,
kind,
}
}
/// Get a snapshot of the current game state.
fn snapshot(&self) -> Snapshot {
self.snapshots.make_snapshot(&self.world)
}
/// Perform an action for a player.
fn perform_action(&mut self, action: Action, player: PlayerId) {
match action.kind {
ActionKind::Move(new) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let mut movement = self.world.get_component_mut::<Movement>(data.entity)?;
movement.direction = new.direction;
Some(())
}();
}
ActionKind::Break(breaking) => {
|| -> Option<()> {
let data = self.players.get(&player)?;
let breaking = breaking
.entity
.and_then(|breaking| self.snapshots.lookup(breaking));
self.world
.get_component_mut::<WorldInteraction>(data.entity)?
.breaking = breaking;
Some(())
}();
}
ActionKind::Throw(throwing) => {
if let Some(data) = self.players.get(&player) {
logic::events::throw(&mut self.world, data.entity, throwing.target);
}
}
}
}
}
impl GameHandle {
/// Register a new client and return it's id.
pub async fn register_player(&mut self) -> crate::Result<PlayerHandle> {
self.send_with(|callback| Command::RegisterPlayer { callback })
.await
}
/// Remove a player from the game.
pub async fn disconnect_player(&mut self, player: PlayerId) -> crate::Result<()> {
self.sender.send(Command::DisconnectPlayer(player)).await?;
Ok(())
}
/// Handle a request made by a player.
pub async fn handle_request(&mut self, request: Request) -> crate::Result<Response> {
self.send_with(move |callback| Command::Request { request, callback })
.await
}
/// Get a snapshot of the current game state.
pub async fn snapshot(&mut self) -> crate::Result<Snapshot> {
self.send_with(|callback| Command::Snapshot { callback })
.await
}
/// Handle an action performed by a player
pub async fn handle_action(&mut self, action: Action, player: PlayerId) -> crate::Result<()> {
self.sender
.send(Command::PerformAction { action, player })
.await?;
Ok(())
}
/// Send a command to the game with the specified callback and then return the value passed into
/// the callback.
async fn send_with<F, O>(&mut self, to_command: F) -> crate::Result<O>
where
F: FnOnce(Callback<O>) -> Command,
{
let (callback, value) = Callback::new();
let command = to_command(callback);
self.sender.send(command).await?;
value.await.map_err(Into::into)
}
}
impl PlayerHandle {
/// Get the id of this player
pub fn id(&self) -> PlayerId {
self.player
}
pub async fn poll_event(&mut self) -> Option<Event> {
self.events.recv().await
}
}
impl<T> Callback<T> {
/// Create a new callback
pub fn new() -> (Callback<T>, oneshot::Receiver<T>) {
let (sender, receiver) = oneshot::channel();
(Callback { sender }, receiver)
}
/// Attempt to send the value, returning false if the receiver was closed.
pub fn send(self, value: T) -> bool {
match self.sender.send(value) {
Ok(()) => true,
Err(_) => false,
}
}
} | pub struct GameHandle {
sender: mpsc::Sender<Command>,
}
| random_line_split |
par_granges.rs | YTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka'super chunks').
/// It then queries that'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one'super chunk' is being worked on by all workers, the last'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf | else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
}
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
} | {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} | conditional_block |
par_granges.rs | YTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka'super chunks').
/// It then queries that'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one'super chunk' is being worked on by all workers, the last'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> |
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
} | {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
} | identifier_body |
par_granges.rs | ABYTE * channel_size_modifier) * threads / size_of(R::P)
channel_size_modifier: f64,
/// The rayon threadpool to operate in
pool: rayon::ThreadPool,
/// The implementation of [RegionProcessor] that will be used to process regions
processor: R,
}
impl<R: RegionProcessor + Send + Sync> ParGranges<R> {
/// Create a ParIO object
///
/// # Arguments
///
/// * `reads`- path to an indexed BAM/CRAM
/// * `ref_fasta`- path to an indexed reference file for CRAM
/// * `regions_bed`- Optional BED file path restricting the regions to be examined
/// * `regions_bcf`- Optional BCF/VCF file path restricting the regions to be examined
/// * `threads`- Optional threads to restrict the number of threads this process will use, defaults to all
/// * `chunksize`- optional argument to change the default chunksize of 1_000_000. `chunksize` determines the number of bases
/// each worker will get to work on at one time.
/// * `channel_size_modifier`- Optional argument to modify the default size ration of the channel that `R::P` is sent on.
/// formula is: ((BYTES_INA_GIGABYTE * channel_size_modifier) * threads) / size_of(R::P)
/// * `processor`- Something that implements [`RegionProcessor`](RegionProcessor)
pub fn new(
reads: PathBuf,
ref_fasta: Option<PathBuf>,
regions_bed: Option<PathBuf>,
regions_bcf: Option<PathBuf>,
threads: Option<usize>,
chunksize: Option<u32>,
channel_size_modifier: Option<f64>,
processor: R,
) -> Self {
let threads = if let Some(threads) = threads {
threads
} else {
num_cpus::get()
};
// Keep two around for main thread and thread running the pool
let threads = std::cmp::max(threads.checked_sub(2).unwrap_or(0), 1);
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build()
.unwrap();
info!("Using {} worker threads.", threads);
Self {
reads,
ref_fasta,
regions_bed,
regions_bcf,
threads,
chunksize: chunksize.unwrap_or(CHUNKSIZE),
channel_size_modifier: channel_size_modifier.unwrap_or(CHANNEL_SIZE_MODIFIER),
pool,
processor,
}
}
/// Process each region.
///
/// This method splits the sequences in the BAM/CRAM header into `chunksize` * `self.threads` regions (aka'super chunks').
/// It then queries that'super chunk' against the intervals (either the BED file, or the whole genome broken up into `chunksize`
/// regions). The results of that query are then processed by a pool of workers that apply `process_region` to reach interval to
/// do perbase analysis on. The collected result for each region is then sent back over the returned `Receiver<R::P>` channel
/// for the caller to use. The results will be returned in order according to the order of the intervals used to drive this method.
///
/// While one'super chunk' is being worked on by all workers, the last'super chunks' results are being printed to either to
/// a file or to STDOUT, in order.
///
/// Note, a common use case of this will be to fetch a region and do a pileup. The bounds of bases being looked at should still be
/// checked since a fetch will pull all reads that overlap the region in question.
pub fn process(self) -> Result<Receiver<R::P>> {
let channel_size: usize = ((BYTES_INA_GIGABYTE as f64 * self.channel_size_modifier).floor()
as usize
/ std::mem::size_of::<R::P>())
* self.threads;
info!(
"Creating channel of length {:?} (* 120 bytes to get mem)",
channel_size
);
let (snd, rxv) = bounded(channel_size);
thread::spawn(move || {
self.pool.install(|| {
info!("Reading from {:?}", self.reads);
let mut reader = IndexedReader::from_path(&self.reads).expect("Indexed BAM/CRAM");
// If passed add ref_fasta
if let Some(ref_fasta) = &self.ref_fasta {
reader.set_reference(ref_fasta).expect("Set ref");
}
// Get a copy of the header
let header = reader.header().to_owned();
// Work out if we are restricted to a subset of sites
let bed_intervals = if let Some(regions_bed) = &self.regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs); | .collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
}
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
}
| lapper.merge_overlaps();
lapper
}) | random_line_split |
par_granges.rs | .regions_bed {
Some(
Self::bed_to_intervals(&header, regions_bed)
.expect("Parsed BED to intervals"),
)
} else {
None
};
let bcf_intervals = if let Some(regions_bcf) = &self.regions_bcf {
Some(
Self::bcf_to_intervals(&header, regions_bcf)
.expect("Parsed BCF/VCF to intervals"),
)
} else {
None
};
let restricted_ivs = match (bed_intervals, bcf_intervals) {
(Some(bed_ivs), Some(bcf_ivs)) => Some(Self::merge_intervals(bed_ivs, bcf_ivs)),
(Some(bed_ivs), None) => Some(bed_ivs),
(None, Some(bcf_ivs)) => Some(bcf_ivs),
(None, None) => None,
};
let intervals = if let Some(restricted) = restricted_ivs {
restricted
} else {
Self::header_to_intervals(&header, self.chunksize)
.expect("Parsed BAM/CRAM header to intervals")
};
// The number positions to try to process in one batch
let serial_step_size = self
.chunksize
.checked_mul(self.threads as u32)
.unwrap_or(u32::MAX); // aka superchunk
for (tid, intervals) in intervals.into_iter().enumerate() {
let tid: u32 = tid as u32;
let tid_end: u32 = header.target_len(tid).unwrap().try_into().unwrap();
// Result holds the processed positions to be sent to writer
let mut result = vec![];
for chunk_start in (0..tid_end).step_by(serial_step_size as usize) {
let tid_name = std::str::from_utf8(header.tid2name(tid)).unwrap();
let chunk_end =
std::cmp::min(chunk_start as u32 + serial_step_size, tid_end);
info!(
"Batch Processing {}:{}-{}",
tid_name, chunk_start, chunk_end
);
let (r, _) = rayon::join(
|| {
// Must be a vec so that par_iter works and results stay in order
let ivs: Vec<Interval<u32, ()>> =
Lapper::<u32, ()>::find(&intervals, chunk_start, chunk_end)
// Truncate intervals that extend forward or backward of chunk in question
.map(|iv| Interval {
start: std::cmp::max(iv.start, chunk_start),
stop: std::cmp::min(iv.stop, chunk_end),
val: (),
})
.collect();
ivs.into_par_iter()
.flat_map(|iv| {
info!("Processing {}:{}-{}", tid_name, iv.start, iv.stop);
self.processor.process_region(tid, iv.start, iv.stop)
})
.collect()
},
|| {
result.into_iter().for_each(|p| {
snd.send(p).expect("Sent a serializable to writer")
})
},
);
result = r;
}
// Send final set of results
result
.into_iter()
.for_each(|p| snd.send(p).expect("Sent a serializable to writer"));
}
});
});
Ok(rxv)
}
// Convert the header into intervals of equally sized chunks. The last interval may be short.
fn header_to_intervals(header: &HeaderView, chunksize: u32) -> Result<Vec<Lapper<u32, ()>>> {
let mut intervals = vec![vec![]; header.target_count() as usize];
for tid in 0..(header.target_count()) {
let tid_len: u32 = header.target_len(tid).unwrap().try_into().unwrap();
for start in (0..tid_len).step_by(chunksize as usize) {
let stop = std::cmp::min(start as u32 + chunksize, tid_len);
intervals[tid as usize].push(Interval {
start: start as u32,
stop: stop,
val: (),
});
}
}
Ok(intervals.into_iter().map(|ivs| Lapper::new(ivs)).collect())
}
/// Read a bed file into a vector of lappers with the index representing the TID
// TODO add a proper error message
fn bed_to_intervals(header: &HeaderView, bed_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bed_reader = bed::Reader::from_file(bed_file)?;
let mut intervals = vec![vec![]; header.target_count() as usize];
for record in bed_reader.records() {
let record = record?;
let tid = header
.tid(record.chrom().as_bytes())
.expect("Chromosome not found in BAM/CRAM header");
intervals[tid as usize].push(Interval {
start: record.start().try_into().unwrap(),
stop: record.end().try_into().unwrap(),
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Read a BCF/VCF file into a vector of lappers with index representing the TID
fn bcf_to_intervals(header: &HeaderView, bcf_file: &PathBuf) -> Result<Vec<Lapper<u32, ()>>> {
let mut bcf_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header_reader = Reader::from_path(bcf_file).expect("Error opening BCF/VCF file.");
let bcf_header = bcf_header_reader.header();
let mut intervals = vec![vec![]; header.target_count() as usize];
// TODO: validate the headers against eachother
for record in bcf_reader.records() {
let record = record?;
let record_rid = bcf_header.rid2name(record.rid().unwrap()).unwrap();
let tid = header
.tid(record_rid)
.expect("Chromosome not found in BAM/CRAM header");
let pos: u32 = record
.pos()
.try_into()
.expect("Got a negative value for pos");
intervals[tid as usize].push(Interval {
start: pos,
stop: pos + 1,
val: (),
});
}
Ok(intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect())
}
/// Merge two sets of restriction intervals together
fn merge_intervals(
a_ivs: Vec<Lapper<u32, ()>>,
b_ivs: Vec<Lapper<u32, ()>>,
) -> Vec<Lapper<u32, ()>> {
let mut intervals = vec![vec![]; a_ivs.len()];
for (i, (a_lapper, b_lapper)) in a_ivs.into_iter().zip(b_ivs.into_iter()).enumerate() {
intervals[i] = a_lapper.into_iter().chain(b_lapper.into_iter()).collect();
}
intervals
.into_iter()
.map(|ivs| {
let mut lapper = Lapper::new(ivs);
lapper.merge_overlaps();
lapper
})
.collect()
}
}
#[cfg(test)]
mod test {
use super::*;
use bio::io::bed;
use num_cpus;
use proptest::prelude::*;
use rust_htslib::{bam, bcf};
use rust_lapper::{Interval, Lapper};
use std::collections::{HashMap, HashSet};
use tempfile::tempdir;
// The purpose of these tests is to demonstrate that positions are covered once under a variety of circumstances
prop_compose! {
fn arb_iv_start(max_iv: u64)(start in 0..max_iv/2) -> u64 { start }
}
prop_compose! {
fn arb_iv_size(max_iv: u64)(size in 1..max_iv/2) -> u64 { size }
}
prop_compose! {
// Create an arbitrary interval where the min size == max_iv / 2
fn arb_iv(max_iv: u64)(start in arb_iv_start(max_iv), size in arb_iv_size(max_iv)) -> Interval<u64, ()> {
Interval {start, stop: start + size, val: ()}
}
}
// Create an arbitrary number of intervals along with the expected number of positions they cover
fn arb_ivs(
max_iv: u64, // max iv size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = (Vec<Interval<u64, ()>>, u64, u64)> {
prop::collection::vec(arb_iv(max_iv), 0..max_ivs).prop_map(|vec| {
let mut furthest_right = 0;
let lapper = Lapper::new(vec.clone());
let expected = lapper.cov();
for iv in vec.iter() {
if iv.stop > furthest_right {
furthest_right = iv.stop;
}
}
(vec, expected, furthest_right)
})
}
// Create arbitrary number of contigs with arbitrary intervals each
fn arb_chrs(
max_chr: usize, // number of chromosomes to use
max_iv: u64, // max interval size
max_ivs: usize, // max number of intervals
) -> impl Strategy<Value = Vec<(Vec<Interval<u64, ()>>, u64, u64)>> {
prop::collection::vec(arb_ivs(max_iv, max_ivs), 0..max_chr)
}
// An empty BAM with correct header
// A BED file with the randomly generated intervals (with expected number of positions)
// proptest generate random chunksize, cpus
proptest! {
#[test]
// add random chunksize and random cpus
// NB: using any larger numbers for this tends to blow up the test runtime
fn interval_set(chromosomes in arb_chrs(4, 10_000, 1_000), chunksize in any::<u32>(), cpus in 0..num_cpus::get(), use_bed in any::<bool>(), use_vcf in any::<bool>()) {
let tempdir = tempdir().unwrap();
let bam_path = tempdir.path().join("test.bam");
let bed_path = tempdir.path().join("test.bed");
let vcf_path = tempdir.path().join("test.vcf");
// Build a BAM
let mut header = bam::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
let mut chr_rec = bam::header::HeaderRecord::new(b"SQ");
chr_rec.push_tag(b"SN", &i.to_string());
chr_rec.push_tag(b"LN", &chr.2.to_string()); // set len as max observed
header.push_record(&chr_rec);
}
let writer = bam::Writer::from_path(&bam_path, &header, bam::Format::BAM).expect("Opened test.bam for writing");
drop(writer); // force flush the writer so the header info is written
bam::index::build(&bam_path, None, bam::index::Type::BAI, 1).unwrap();
// Build a bed
let mut writer = bed::Writer::to_file(&bed_path).expect("Opened test.bed for writing");
for (i, chr) in chromosomes.iter().enumerate() {
for iv in chr.0.iter() {
let mut record = bed::Record::new();
record.set_start(iv.start);
record.set_end(iv.stop);
record.set_chrom(&i.to_string());
record.set_score(&0.to_string());
writer.write(&record).expect("Wrote to test.bed");
}
}
drop(writer); // force flush
// Build a VCF file
let mut vcf_truth = HashMap::new();
let mut header = bcf::header::Header::new();
for (i,chr) in chromosomes.iter().enumerate() {
header.push_record(format!("##contig=<ID={},length={}>", &i.to_string(), &chr.2.to_string()).as_bytes());
}
let mut writer = bcf::Writer::from_path(&vcf_path, &header, true, bcf::Format::VCF).expect("Failed to open test.vcf for writing");
let mut record = writer.empty_record();
for (i, chr) in chromosomes.iter().enumerate() {
record.set_rid(Some(i as u32));
let counter = vcf_truth.entry(i).or_insert(0);
let mut seen = HashSet::new();
for iv in chr.0.iter() {
if!seen.contains(&iv.start) {
*counter += 1;
seen.insert(iv.start);
}
record.set_pos(iv.start as i64);
writer.write(&record).expect("Failed to write to test.vcf")
}
}
drop(writer); // force flush
// Create the processor with a dumb impl of processing that just returns positions with no counting
let test_processor = TestProcessor {};
let par_granges_runner = ParGranges::new(
bam_path,
None,
if use_bed { Some(bed_path) } else { None }, // do one with regions
if use_vcf { Some(vcf_path) } else { None }, // do one with vcf regions
Some(cpus),
Some(chunksize),
Some(0.002),
test_processor
);
let receiver = par_granges_runner.process().expect("Launch ParGranges Process");
let mut chrom_counts = HashMap::new();
receiver.into_iter().for_each(|p: PileupPosition| {
let positions = chrom_counts.entry(p.ref_seq.parse::<usize>().expect("parsed chr")).or_insert(0u64);
*positions += 1
});
// Validate that for each chr we get the expected number of bases
for (chrom, positions) in chrom_counts.iter() {
if use_bed &&!use_vcf {
// if this was with bed, should be equal to.1
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_bed && use_vcf {
// if this was with bed, should be equal to.1, bed restrictions and vcf restrctions should overlap
prop_assert_eq!(chromosomes[*chrom].1, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else if use_vcf &&!use_bed {
// total positions should be equal to the number of records for that chr in the vcf
prop_assert_eq!(vcf_truth.get(chrom).unwrap(), positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].1, positions);
} else {
// if this was bam only, should be equal to rightmost postion
prop_assert_eq!(chromosomes[*chrom].2, *positions, "chr: {}, expected: {}, found: {}", chrom, chromosomes[*chrom].2, positions);
}
}
}
}
use crate::position::{pileup_position::PileupPosition, Position};
use smartstring::SmartString;
struct TestProcessor {}
impl RegionProcessor for TestProcessor {
type P = PileupPosition;
fn | process_region | identifier_name |
|
medrs.rs | extern crate config;
extern crate mediawiki;
extern crate papers;
extern crate regex;
extern crate wikibase;
#[macro_use]
extern crate lazy_static;
/*
use papers::crossref2wikidata::Crossref2Wikidata;
use papers::orcid2wikidata::Orcid2Wikidata;
use papers::pubmed2wikidata::Pubmed2Wikidata;
use papers::semanticscholar2wikidata::Semanticscholar2Wikidata;
*/
use docopt::Docopt;
use mediawiki::api::Api;
use papers::wikidata_papers::WikidataPapers;
use papers::*;
use regex::Regex;
use serde::Deserialize;
use std::str;
use std::{
fs::File,
io::{prelude::*, BufReader},
};
use urlencoding;
fn lines_from_file(filename: &str) -> Vec<String> {
if filename.is_empty() {
return vec![];
}
let file = File::open(filename).expect(format!("no such file: {}", filename).as_str());
let buf = BufReader::new(file);
buf.lines()
.map(|l| l.expect("Could not parse line"))
.collect()
}
fn read_file_to_string(filename: &str) -> String {
let mut file = match File::open(filename) {
Ok(file) => file,
Err(_) => panic!("no such file"),
};
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)
.ok()
.expect("failed to read!");
file_contents
}
fn replace_sparql_placeolder(pattern: &str, sparql: &String, lines: &Vec<String>) -> String {
let rep: String = if lines.is_empty() {
"".to_string()
} else {
"wd:".to_string() + &lines.join(" wd:")
};
sparql.replace(pattern, &rep)
}
fn output_sparql_result_items(sparql: &String) {
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let result = api.sparql_query(&sparql).expect("SPARQL query failed");
let varname = result["head"]["vars"][0]
.as_str()
.expect("Can't find first variable name in SPARQL result");
let entities = api.entities_from_sparql_result(&result, &varname);
println!("{}", entities.join("\n"));
}
/*
fn get_all_from_stdin() -> String {
let mut payload = Vec::new();
io::stdin().read_to_end(&mut payload).unwrap();
let s = match str::from_utf8(&payload) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
s.to_string()
}
*/
fn command_query(args: &Args) {
if args.arg_query.is_empty() {
println!("Requires SPARQL query");
}
let sparql = &args.arg_query;
output_sparql_result_items(&sparql);
}
fn command_run(args: &Args) {
let articles = lines_from_file(&args.flag_articles);
let reviews = lines_from_file(&args.flag_reviews);
let topics = lines_from_file(&args.flag_topics);
let journals = lines_from_file(&args.flag_journals);
let publishers = lines_from_file(&args.flag_publishers);
let mut sparql = read_file_to_string(&args.flag_sparql);
sparql = replace_sparql_placeolder("%%ARTICLES%%", &sparql, &articles);
sparql = replace_sparql_placeolder("%%REVIEWS%%", &sparql, &reviews);
sparql = replace_sparql_placeolder("%%TOPICS%%", &sparql, &topics);
sparql = replace_sparql_placeolder("%%JOURNALS%%", &sparql, &journals);
sparql = replace_sparql_placeolder("%%PUBLISHERS%%", &sparql, &publishers);
output_sparql_result_items(&sparql);
}
fn get_api_url_for_wiki(wiki: &String) -> Option<String> {
// Get site matrix from wikidata
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let params = api.params_into(&vec![("action", "sitematrix")]);
let site_matrix = api
.get_query_api_json(¶ms)
.expect("Can't load sitematrix from wikidata API");
//println!("{:#?}", &site_matrix);
// Go through the "normal" objects
let mut ret: Option<String> = None;
site_matrix["sitematrix"]
.as_object()
.expect("sitematrix is not an object")
.iter()
.for_each(|(_, data)| {
match data["site"]
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
x["url"].as_str()
} else {
None
}
})
.next()
{
Some(url) => {
ret = Some(url.to_string() + "/w/api.php");
}
None => {}
}
});
// Try the "specials"
site_matrix["sitematrix"]["specials"]
.as_array()
.unwrap_or(&vec![])
.iter()
.for_each(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
ret = Some(x["url"].as_str().unwrap_or("").to_string() + "/w/api.php");
}
});
ret
}
fn get_external_urls(api_url: &String, title: &String) -> Vec<String> {
let api = Api::new(&api_url).expect(&format!("Can't connect to {}", &api_url));
let params = api.params_into(&vec![ | ("action", "query"),
("prop", "extlinks"),
("ellimit", "500"),
("titles", title.as_str()),
]);
let result = api
.get_query_api_json_all(¶ms)
.expect("query.extlinks failed");
let mut urls: Vec<String> = vec![];
result["query"]["pages"]
.as_object()
.expect("query.pages in result not an object")
.iter()
.for_each(|(_page_id, data)| {
data["extlinks"]
.as_array()
.expect("extlinks not an array")
.iter()
.for_each(|x| urls.push(x["*"].as_str().expect("* not a string").to_string()));
});
urls
}
fn get_paper_q(api: &Api, id: &GenericWorkIdentifier) -> Option<String> {
let wdp = WikidataPapers::new();
match &id.work_type {
GenericWorkType::Property(prop) => {
let result = wdp.search_external_id(&prop, &id.id, api);
result.get(0).map(|s| s.to_owned()) // First one will do
}
_ => None,
}
/*
wdp.add_adapter(Box::new(Pubmed2Wikidata::new()));
wdp.add_adapter(Box::new(Crossref2Wikidata::new()));
wdp.add_adapter(Box::new(Semanticscholar2Wikidata::new()));
wdp.add_adapter(Box::new(Orcid2Wikidata::new()));
let ids = vec![id.to_owned()];
let ids = wdp.update_from_paper_ids(&ids);
let q = ids
.iter()
.filter_map(|x| match x.work_type {
GenericWorkType::Item => Some(x.id.to_owned()),
_ => None,
})
.next();
q*/
}
fn command_refs(args: &Args) {
if args.arg_wiki.is_empty() {
panic!("wiki code (e.g. 'enwiki') is required");
}
if args.arg_title.is_empty() {
panic!("article title is required");
}
let wiki = &args.arg_wiki;
let title = &args.arg_title;
// Get the API URL for the wiki
let api_url = match get_api_url_for_wiki(&wiki) {
Some(url) => url,
None => panic!("Can't find API URL for {}", &wiki),
};
// Get all external URLs from that page, on that wiki
let urls = get_external_urls(&api_url, &title);
//println!("{:#?}", &urls);
lazy_static! {
static ref RE_DOI: Regex = Regex::new(r#"^*.?//doi.org/(.+)$"#).unwrap();
static ref RE_PMID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pubmed/(\d+)$"#).unwrap();
static ref RE_PMCID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pmc/articles/PMC(\d+)$"#).unwrap();
}
let mut ids: Vec<GenericWorkIdentifier> = vec![];
for url in urls {
match RE_DOI.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
match urlencoding::decode(&id) {
Ok(id) => {
ids.push(GenericWorkIdentifier::new_prop(PROP_DOI, &id));
}
_ => {}
}
}
None => {}
}
match RE_PMID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMID, id));
}
None => {}
}
match RE_PMCID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMCID, id));
}
None => {}
}
}
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
for id in ids {
match get_paper_q(&api, &id) {
Some(q) => {
println!("{}", &q);
}
None => {
/*
/TODO
let prop = match &id.work_type {
GenericWorkType::Property(p) => p,
_ => continue,
};
println!("No item for https://www.wikidata.org/w/index.php?search=&search=haswbstatement%3A{}={}&title=Special%3ASearch&go=Go&ns0=1&ns120=1", &prop,&id.id);
*/
}
}
}
}
const USAGE: &'static str = "
MEDRS
Usage:
medrs run [--articles=<file>] [--reviews=<file>] [--topics=<file>] [--journals=<file>] [--publishers=<file>] [--sparql=<file>]
medrs query <query>
medrs refs <wiki> <title>
medrs (-h | --help)
medrs --version
Options:
-h --help Show this screen.
--version Show version.
--reviews=<file> Deprecated reviews (article blacklist)
--topics=<file> Topical whitelist
--journals=<file> OA exceptions (journal whitelist)
--publishers=<file> Beall's list (publisher blacklist)
--sparql=<file> SPARQL pattern
";
#[derive(Debug, Deserialize)]
struct Args {
flag_articles: String,
flag_reviews: String,
flag_topics: String,
flag_journals: String,
flag_publishers: String,
flag_sparql: String,
arg_query: String,
arg_title: String,
arg_wiki: String,
cmd_run: bool,
cmd_query: bool,
cmd_refs: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
//println!("{:?}", args);
if args.cmd_query {
command_query(&args);
}
if args.cmd_run {
command_run(&args);
}
if args.cmd_refs {
command_refs(&args);
}
} | random_line_split |
|
medrs.rs | extern crate config;
extern crate mediawiki;
extern crate papers;
extern crate regex;
extern crate wikibase;
#[macro_use]
extern crate lazy_static;
/*
use papers::crossref2wikidata::Crossref2Wikidata;
use papers::orcid2wikidata::Orcid2Wikidata;
use papers::pubmed2wikidata::Pubmed2Wikidata;
use papers::semanticscholar2wikidata::Semanticscholar2Wikidata;
*/
use docopt::Docopt;
use mediawiki::api::Api;
use papers::wikidata_papers::WikidataPapers;
use papers::*;
use regex::Regex;
use serde::Deserialize;
use std::str;
use std::{
fs::File,
io::{prelude::*, BufReader},
};
use urlencoding;
fn | (filename: &str) -> Vec<String> {
if filename.is_empty() {
return vec![];
}
let file = File::open(filename).expect(format!("no such file: {}", filename).as_str());
let buf = BufReader::new(file);
buf.lines()
.map(|l| l.expect("Could not parse line"))
.collect()
}
fn read_file_to_string(filename: &str) -> String {
let mut file = match File::open(filename) {
Ok(file) => file,
Err(_) => panic!("no such file"),
};
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)
.ok()
.expect("failed to read!");
file_contents
}
fn replace_sparql_placeolder(pattern: &str, sparql: &String, lines: &Vec<String>) -> String {
let rep: String = if lines.is_empty() {
"".to_string()
} else {
"wd:".to_string() + &lines.join(" wd:")
};
sparql.replace(pattern, &rep)
}
fn output_sparql_result_items(sparql: &String) {
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let result = api.sparql_query(&sparql).expect("SPARQL query failed");
let varname = result["head"]["vars"][0]
.as_str()
.expect("Can't find first variable name in SPARQL result");
let entities = api.entities_from_sparql_result(&result, &varname);
println!("{}", entities.join("\n"));
}
/*
fn get_all_from_stdin() -> String {
let mut payload = Vec::new();
io::stdin().read_to_end(&mut payload).unwrap();
let s = match str::from_utf8(&payload) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
s.to_string()
}
*/
fn command_query(args: &Args) {
if args.arg_query.is_empty() {
println!("Requires SPARQL query");
}
let sparql = &args.arg_query;
output_sparql_result_items(&sparql);
}
fn command_run(args: &Args) {
let articles = lines_from_file(&args.flag_articles);
let reviews = lines_from_file(&args.flag_reviews);
let topics = lines_from_file(&args.flag_topics);
let journals = lines_from_file(&args.flag_journals);
let publishers = lines_from_file(&args.flag_publishers);
let mut sparql = read_file_to_string(&args.flag_sparql);
sparql = replace_sparql_placeolder("%%ARTICLES%%", &sparql, &articles);
sparql = replace_sparql_placeolder("%%REVIEWS%%", &sparql, &reviews);
sparql = replace_sparql_placeolder("%%TOPICS%%", &sparql, &topics);
sparql = replace_sparql_placeolder("%%JOURNALS%%", &sparql, &journals);
sparql = replace_sparql_placeolder("%%PUBLISHERS%%", &sparql, &publishers);
output_sparql_result_items(&sparql);
}
fn get_api_url_for_wiki(wiki: &String) -> Option<String> {
// Get site matrix from wikidata
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let params = api.params_into(&vec![("action", "sitematrix")]);
let site_matrix = api
.get_query_api_json(¶ms)
.expect("Can't load sitematrix from wikidata API");
//println!("{:#?}", &site_matrix);
// Go through the "normal" objects
let mut ret: Option<String> = None;
site_matrix["sitematrix"]
.as_object()
.expect("sitematrix is not an object")
.iter()
.for_each(|(_, data)| {
match data["site"]
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
x["url"].as_str()
} else {
None
}
})
.next()
{
Some(url) => {
ret = Some(url.to_string() + "/w/api.php");
}
None => {}
}
});
// Try the "specials"
site_matrix["sitematrix"]["specials"]
.as_array()
.unwrap_or(&vec![])
.iter()
.for_each(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
ret = Some(x["url"].as_str().unwrap_or("").to_string() + "/w/api.php");
}
});
ret
}
fn get_external_urls(api_url: &String, title: &String) -> Vec<String> {
let api = Api::new(&api_url).expect(&format!("Can't connect to {}", &api_url));
let params = api.params_into(&vec![
("action", "query"),
("prop", "extlinks"),
("ellimit", "500"),
("titles", title.as_str()),
]);
let result = api
.get_query_api_json_all(¶ms)
.expect("query.extlinks failed");
let mut urls: Vec<String> = vec![];
result["query"]["pages"]
.as_object()
.expect("query.pages in result not an object")
.iter()
.for_each(|(_page_id, data)| {
data["extlinks"]
.as_array()
.expect("extlinks not an array")
.iter()
.for_each(|x| urls.push(x["*"].as_str().expect("* not a string").to_string()));
});
urls
}
fn get_paper_q(api: &Api, id: &GenericWorkIdentifier) -> Option<String> {
let wdp = WikidataPapers::new();
match &id.work_type {
GenericWorkType::Property(prop) => {
let result = wdp.search_external_id(&prop, &id.id, api);
result.get(0).map(|s| s.to_owned()) // First one will do
}
_ => None,
}
/*
wdp.add_adapter(Box::new(Pubmed2Wikidata::new()));
wdp.add_adapter(Box::new(Crossref2Wikidata::new()));
wdp.add_adapter(Box::new(Semanticscholar2Wikidata::new()));
wdp.add_adapter(Box::new(Orcid2Wikidata::new()));
let ids = vec![id.to_owned()];
let ids = wdp.update_from_paper_ids(&ids);
let q = ids
.iter()
.filter_map(|x| match x.work_type {
GenericWorkType::Item => Some(x.id.to_owned()),
_ => None,
})
.next();
q*/
}
fn command_refs(args: &Args) {
if args.arg_wiki.is_empty() {
panic!("wiki code (e.g. 'enwiki') is required");
}
if args.arg_title.is_empty() {
panic!("article title is required");
}
let wiki = &args.arg_wiki;
let title = &args.arg_title;
// Get the API URL for the wiki
let api_url = match get_api_url_for_wiki(&wiki) {
Some(url) => url,
None => panic!("Can't find API URL for {}", &wiki),
};
// Get all external URLs from that page, on that wiki
let urls = get_external_urls(&api_url, &title);
//println!("{:#?}", &urls);
lazy_static! {
static ref RE_DOI: Regex = Regex::new(r#"^*.?//doi.org/(.+)$"#).unwrap();
static ref RE_PMID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pubmed/(\d+)$"#).unwrap();
static ref RE_PMCID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pmc/articles/PMC(\d+)$"#).unwrap();
}
let mut ids: Vec<GenericWorkIdentifier> = vec![];
for url in urls {
match RE_DOI.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
match urlencoding::decode(&id) {
Ok(id) => {
ids.push(GenericWorkIdentifier::new_prop(PROP_DOI, &id));
}
_ => {}
}
}
None => {}
}
match RE_PMID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMID, id));
}
None => {}
}
match RE_PMCID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMCID, id));
}
None => {}
}
}
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
for id in ids {
match get_paper_q(&api, &id) {
Some(q) => {
println!("{}", &q);
}
None => {
/*
/TODO
let prop = match &id.work_type {
GenericWorkType::Property(p) => p,
_ => continue,
};
println!("No item for https://www.wikidata.org/w/index.php?search=&search=haswbstatement%3A{}={}&title=Special%3ASearch&go=Go&ns0=1&ns120=1", &prop,&id.id);
*/
}
}
}
}
const USAGE: &'static str = "
MEDRS
Usage:
medrs run [--articles=<file>] [--reviews=<file>] [--topics=<file>] [--journals=<file>] [--publishers=<file>] [--sparql=<file>]
medrs query <query>
medrs refs <wiki> <title>
medrs (-h | --help)
medrs --version
Options:
-h --help Show this screen.
--version Show version.
--reviews=<file> Deprecated reviews (article blacklist)
--topics=<file> Topical whitelist
--journals=<file> OA exceptions (journal whitelist)
--publishers=<file> Beall's list (publisher blacklist)
--sparql=<file> SPARQL pattern
";
#[derive(Debug, Deserialize)]
struct Args {
flag_articles: String,
flag_reviews: String,
flag_topics: String,
flag_journals: String,
flag_publishers: String,
flag_sparql: String,
arg_query: String,
arg_title: String,
arg_wiki: String,
cmd_run: bool,
cmd_query: bool,
cmd_refs: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
//println!("{:?}", args);
if args.cmd_query {
command_query(&args);
}
if args.cmd_run {
command_run(&args);
}
if args.cmd_refs {
command_refs(&args);
}
}
| lines_from_file | identifier_name |
medrs.rs | extern crate config;
extern crate mediawiki;
extern crate papers;
extern crate regex;
extern crate wikibase;
#[macro_use]
extern crate lazy_static;
/*
use papers::crossref2wikidata::Crossref2Wikidata;
use papers::orcid2wikidata::Orcid2Wikidata;
use papers::pubmed2wikidata::Pubmed2Wikidata;
use papers::semanticscholar2wikidata::Semanticscholar2Wikidata;
*/
use docopt::Docopt;
use mediawiki::api::Api;
use papers::wikidata_papers::WikidataPapers;
use papers::*;
use regex::Regex;
use serde::Deserialize;
use std::str;
use std::{
fs::File,
io::{prelude::*, BufReader},
};
use urlencoding;
fn lines_from_file(filename: &str) -> Vec<String> {
if filename.is_empty() {
return vec![];
}
let file = File::open(filename).expect(format!("no such file: {}", filename).as_str());
let buf = BufReader::new(file);
buf.lines()
.map(|l| l.expect("Could not parse line"))
.collect()
}
fn read_file_to_string(filename: &str) -> String {
let mut file = match File::open(filename) {
Ok(file) => file,
Err(_) => panic!("no such file"),
};
let mut file_contents = String::new();
file.read_to_string(&mut file_contents)
.ok()
.expect("failed to read!");
file_contents
}
fn replace_sparql_placeolder(pattern: &str, sparql: &String, lines: &Vec<String>) -> String |
fn output_sparql_result_items(sparql: &String) {
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let result = api.sparql_query(&sparql).expect("SPARQL query failed");
let varname = result["head"]["vars"][0]
.as_str()
.expect("Can't find first variable name in SPARQL result");
let entities = api.entities_from_sparql_result(&result, &varname);
println!("{}", entities.join("\n"));
}
/*
fn get_all_from_stdin() -> String {
let mut payload = Vec::new();
io::stdin().read_to_end(&mut payload).unwrap();
let s = match str::from_utf8(&payload) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
s.to_string()
}
*/
fn command_query(args: &Args) {
if args.arg_query.is_empty() {
println!("Requires SPARQL query");
}
let sparql = &args.arg_query;
output_sparql_result_items(&sparql);
}
fn command_run(args: &Args) {
let articles = lines_from_file(&args.flag_articles);
let reviews = lines_from_file(&args.flag_reviews);
let topics = lines_from_file(&args.flag_topics);
let journals = lines_from_file(&args.flag_journals);
let publishers = lines_from_file(&args.flag_publishers);
let mut sparql = read_file_to_string(&args.flag_sparql);
sparql = replace_sparql_placeolder("%%ARTICLES%%", &sparql, &articles);
sparql = replace_sparql_placeolder("%%REVIEWS%%", &sparql, &reviews);
sparql = replace_sparql_placeolder("%%TOPICS%%", &sparql, &topics);
sparql = replace_sparql_placeolder("%%JOURNALS%%", &sparql, &journals);
sparql = replace_sparql_placeolder("%%PUBLISHERS%%", &sparql, &publishers);
output_sparql_result_items(&sparql);
}
fn get_api_url_for_wiki(wiki: &String) -> Option<String> {
// Get site matrix from wikidata
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
let params = api.params_into(&vec![("action", "sitematrix")]);
let site_matrix = api
.get_query_api_json(¶ms)
.expect("Can't load sitematrix from wikidata API");
//println!("{:#?}", &site_matrix);
// Go through the "normal" objects
let mut ret: Option<String> = None;
site_matrix["sitematrix"]
.as_object()
.expect("sitematrix is not an object")
.iter()
.for_each(|(_, data)| {
match data["site"]
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
x["url"].as_str()
} else {
None
}
})
.next()
{
Some(url) => {
ret = Some(url.to_string() + "/w/api.php");
}
None => {}
}
});
// Try the "specials"
site_matrix["sitematrix"]["specials"]
.as_array()
.unwrap_or(&vec![])
.iter()
.for_each(|x| {
if x["dbname"].as_str().unwrap_or("") == wiki {
ret = Some(x["url"].as_str().unwrap_or("").to_string() + "/w/api.php");
}
});
ret
}
fn get_external_urls(api_url: &String, title: &String) -> Vec<String> {
let api = Api::new(&api_url).expect(&format!("Can't connect to {}", &api_url));
let params = api.params_into(&vec![
("action", "query"),
("prop", "extlinks"),
("ellimit", "500"),
("titles", title.as_str()),
]);
let result = api
.get_query_api_json_all(¶ms)
.expect("query.extlinks failed");
let mut urls: Vec<String> = vec![];
result["query"]["pages"]
.as_object()
.expect("query.pages in result not an object")
.iter()
.for_each(|(_page_id, data)| {
data["extlinks"]
.as_array()
.expect("extlinks not an array")
.iter()
.for_each(|x| urls.push(x["*"].as_str().expect("* not a string").to_string()));
});
urls
}
fn get_paper_q(api: &Api, id: &GenericWorkIdentifier) -> Option<String> {
let wdp = WikidataPapers::new();
match &id.work_type {
GenericWorkType::Property(prop) => {
let result = wdp.search_external_id(&prop, &id.id, api);
result.get(0).map(|s| s.to_owned()) // First one will do
}
_ => None,
}
/*
wdp.add_adapter(Box::new(Pubmed2Wikidata::new()));
wdp.add_adapter(Box::new(Crossref2Wikidata::new()));
wdp.add_adapter(Box::new(Semanticscholar2Wikidata::new()));
wdp.add_adapter(Box::new(Orcid2Wikidata::new()));
let ids = vec![id.to_owned()];
let ids = wdp.update_from_paper_ids(&ids);
let q = ids
.iter()
.filter_map(|x| match x.work_type {
GenericWorkType::Item => Some(x.id.to_owned()),
_ => None,
})
.next();
q*/
}
fn command_refs(args: &Args) {
if args.arg_wiki.is_empty() {
panic!("wiki code (e.g. 'enwiki') is required");
}
if args.arg_title.is_empty() {
panic!("article title is required");
}
let wiki = &args.arg_wiki;
let title = &args.arg_title;
// Get the API URL for the wiki
let api_url = match get_api_url_for_wiki(&wiki) {
Some(url) => url,
None => panic!("Can't find API URL for {}", &wiki),
};
// Get all external URLs from that page, on that wiki
let urls = get_external_urls(&api_url, &title);
//println!("{:#?}", &urls);
lazy_static! {
static ref RE_DOI: Regex = Regex::new(r#"^*.?//doi.org/(.+)$"#).unwrap();
static ref RE_PMID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pubmed/(\d+)$"#).unwrap();
static ref RE_PMCID: Regex =
Regex::new(r#"^*.?//www.ncbi.nlm.nih.gov/pmc/articles/PMC(\d+)$"#).unwrap();
}
let mut ids: Vec<GenericWorkIdentifier> = vec![];
for url in urls {
match RE_DOI.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
match urlencoding::decode(&id) {
Ok(id) => {
ids.push(GenericWorkIdentifier::new_prop(PROP_DOI, &id));
}
_ => {}
}
}
None => {}
}
match RE_PMID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMID, id));
}
None => {}
}
match RE_PMCID.captures(&url) {
Some(caps) => {
let id = caps.get(1).unwrap().as_str();
ids.push(GenericWorkIdentifier::new_prop(PROP_PMCID, id));
}
None => {}
}
}
let api = Api::new("https://www.wikidata.org/w/api.php").expect("Can't connect to Wikidata");
for id in ids {
match get_paper_q(&api, &id) {
Some(q) => {
println!("{}", &q);
}
None => {
/*
/TODO
let prop = match &id.work_type {
GenericWorkType::Property(p) => p,
_ => continue,
};
println!("No item for https://www.wikidata.org/w/index.php?search=&search=haswbstatement%3A{}={}&title=Special%3ASearch&go=Go&ns0=1&ns120=1", &prop,&id.id);
*/
}
}
}
}
const USAGE: &'static str = "
MEDRS
Usage:
medrs run [--articles=<file>] [--reviews=<file>] [--topics=<file>] [--journals=<file>] [--publishers=<file>] [--sparql=<file>]
medrs query <query>
medrs refs <wiki> <title>
medrs (-h | --help)
medrs --version
Options:
-h --help Show this screen.
--version Show version.
--reviews=<file> Deprecated reviews (article blacklist)
--topics=<file> Topical whitelist
--journals=<file> OA exceptions (journal whitelist)
--publishers=<file> Beall's list (publisher blacklist)
--sparql=<file> SPARQL pattern
";
#[derive(Debug, Deserialize)]
struct Args {
flag_articles: String,
flag_reviews: String,
flag_topics: String,
flag_journals: String,
flag_publishers: String,
flag_sparql: String,
arg_query: String,
arg_title: String,
arg_wiki: String,
cmd_run: bool,
cmd_query: bool,
cmd_refs: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
//println!("{:?}", args);
if args.cmd_query {
command_query(&args);
}
if args.cmd_run {
command_run(&args);
}
if args.cmd_refs {
command_refs(&args);
}
}
| {
let rep: String = if lines.is_empty() {
"".to_string()
} else {
"wd:".to_string() + &lines.join(" wd:")
};
sparql.replace(pattern, &rep)
} | identifier_body |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
///
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
| working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>>.*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. ([email protected]).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if!working_directory.is_empty() &&!working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if!input_file.contains(path::MAIN_SEPARATOR) &&!input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if!outlet_file.contains(path::MAIN_SEPARATOR) &&!outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if!output_file.contains(path::MAIN_SEPARATOR) &&!output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type()!= ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type()!= ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress!= old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type!= ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if!visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while!queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if!is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n!= fid2 &&!visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress!= old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress!= old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn get_first_node(&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | Input/output file names can be fully qualified, or can rely on the | random_line_split |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
///
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
Input/output file names can be fully qualified, or can rely on the
working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>>.*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. ([email protected]).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if!working_directory.is_empty() &&!working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" | else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if!input_file.contains(path::MAIN_SEPARATOR) &&!input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if!outlet_file.contains(path::MAIN_SEPARATOR) &&!outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if!output_file.contains(path::MAIN_SEPARATOR) &&!output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type()!= ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type()!= ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress!= old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type!= ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if!visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while!queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if!is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n!= fid2 &&!visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress!= old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress!= old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn get_first_node(&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} | conditional_block |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
///
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
Input/output file names can be fully qualified, or can rely on the
working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>>.*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. ([email protected]).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() |
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if!working_directory.is_empty() &&!working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if!input_file.contains(path::MAIN_SEPARATOR) &&!input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if!outlet_file.contains(path::MAIN_SEPARATOR) &&!outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if!output_file.contains(path::MAIN_SEPARATOR) &&!output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type()!= ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type()!= ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress!= old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type!= ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if!visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while!queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if!is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n!= fid2 &&!visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress!= old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress!= old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn get_first_node(&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
} | identifier_body |
main.rs | /*
Authors: Prof. John Lindsay
Created: 15/08/2023 (oringinally in Whitebox Toolset Extension)
Last Modified: 15/08/2023
License: MIT
*/
use rstar::primitives::GeomWithData;
use rstar::RTree;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::ops::Index;
use std::path;
use std::str;
use std::time::Instant;
use std::collections::VecDeque;
use whitebox_common::structures::Point2D;
use whitebox_common::utils::{
get_formatted_elapsed_time,
wrapped_print
};
use whitebox_vector::{
// AttributeField,
// FieldData,
// FieldDataType,
Shapefile,
ShapefileGeometry,
ShapeType
};
const EPSILON: f64 = std::f64::EPSILON;
/// This tool can be used to resolve many of the topological errors and inconsistencies associated with
/// manually digitized vector stream networks, i.e. hydrography data. A properly structured stream network
/// should consist of a series of stream segments that connect a channel head to a downstream confluence,
/// or an upstream confluence to a downstream confluence/outlet. This tool will join vector arcs that
/// connect at arbitrary, non-confluence points along stream segments. It also splits an arc where
/// a tributary stream connects at a mid-point, thereby creating a proper confluence where two upstream
/// triburaries converge into a downstream segment. The tool also handles non-connecting tributaries
/// caused by dangling arcs, i.e. overshoots and undershoots.
///
///
///
/// The user may optinally specify the name of the input vector stream network (`--input`) and the output file
/// (`--output`). Note that if an input file is not specified by the user, the tool will search for all vector
/// files (*.shp) files contained within the current working directory. This feature can be very useful when
/// you need to process a large number of stream files contained within a single directory. The tool will
/// process the files in parallel in this batch mode.
///
/// A distance threshold for snapping dangling arcs (`--snap`) must be specified by the user. This distance
/// is in the input layer's x-y units. The tool works best on projected input
/// data, however, if the input are in geographic coordinates (latitude and longitude), then specifying a
/// small valued snap distance is advisable.
///
/// Notice that the attributes of the input layer will not be
/// carried over to the output file because there is not a one-for-one feature correspondence between the
/// two files due to the joins and splits of stream segments. Instead the output attribute table will
/// only contain a feature ID (FID) entry.
///
/// > Note: this tool should be used to pre-process vector streams that are input to the
/// > `VectorStreamNetworkAnalysis` tool.
///
/// # See Also
/// `VectorStreamNetworkAnalysis`, `FixDanglingArcs`
fn main() {
let args: Vec<String> = env::args().collect();
if args[1].trim() == "run" {
match run(&args) {
Ok(_) => {}
Err(e) => panic!("{:?}", e),
}
}
if args.len() <= 1 || args[1].trim() == "help" {
// print help
help();
}
if args[1].trim() == "version" {
// print version information
version();
}
}
fn help() {
let mut ext = "";
if cfg!(target_os = "windows") {
ext = ".exe";
}
let exe_name = &format!("correct_stream_vector_direction{}", ext);
let sep: String = path::MAIN_SEPARATOR.to_string();
let s = r#"
This tool resolves topological errors and inconsistencies associated with digitized vector streams.
The following commands are recognized:
help Prints help information.
run Runs the tool.
version Prints the tool version information.
The following flags can be used with the 'run' command:
--routes Name of the input routes vector file.
-o, --output Name of the output HTML file.
--length Maximum segment length (m).
--dist Search distance, in grid cells, used in visibility analysis.
Input/output file names can be fully qualified, or can rely on the
working directory contained in the WhiteboxTools settings.json file.
Example Usage:
>>.*EXE_NAME run --routes=footpath.shp --dem=DEM.tif -o=assessedRoutes.shp --length=50.0 --dist=200
Note: Use of this tool requires a valid license. To obtain a license,
contact Whitebox Geospatial Inc. ([email protected]).
"#
.replace("*", &sep)
.replace("EXE_NAME", exe_name);
println!("{}", s);
}
fn version() {
const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
println!(
"correct_stream_vector_direction v{} by Dr. John B. Lindsay (c) 2023.",
VERSION.unwrap_or("Unknown version")
);
}
fn get_tool_name() -> String {
String::from("CorrectStreamVectorDirection") // This should be camel case and is a reference to the tool name.
}
fn run(args: &Vec<String>) -> Result<(), std::io::Error> {
let tool_name = get_tool_name();
let sep: String = path::MAIN_SEPARATOR.to_string();
// Read in the environment variables and get the necessary values
let configurations = whitebox_common::configs::get_configs()?;
let mut working_directory = configurations.working_directory.clone();
if!working_directory.is_empty() &&!working_directory.ends_with(&sep) {
working_directory += &sep;
}
// read the arguments
let mut input_file = String::new();
let mut outlet_file = String::new();
let mut output_file: String = String::new();
let mut snap_dist = 1.0;
if args.len() <= 1 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with too few parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-outlet" {
outlet_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-snap" || flag_val == "-dist" {
snap_dist = if keyval {
vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val))
};
}
}
if configurations.verbose_mode {
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
// let mut progress: usize;
let mut old_progress: usize = 1;
let start = Instant::now();
if!input_file.contains(path::MAIN_SEPARATOR) &&!input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if!outlet_file.contains(path::MAIN_SEPARATOR) &&!outlet_file.contains("/") {
outlet_file = format!("{}{}", working_directory, outlet_file);
}
if output_file.is_empty() {
output_file = input_file
.clone()
.replace(".shp", "_corrected.shp")
.replace(".SHP", "_corrected.shp");
}
if!output_file.contains(path::MAIN_SEPARATOR) &&!output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if snap_dist <= 0f64 {
if configurations.verbose_mode {
wrapped_print("Error: The snap distance must be greater than 0.0.", 50);
}
}
let input = Shapefile::read(&input_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if input.header.shape_type.base_shape_type()!= ShapeType::PolyLine {
// return Err(Error::new(
// ErrorKind::InvalidInput,
// "The vector data must be of PolyLine base shape type.",
// ));
panic!("The vector stream data must be of PolyLine base shape type.");
}
let outlets = Shapefile::read(&outlet_file).expect("Error reading file"); //?;
// Make sure the input vector file is of polyline type
if outlets.header.shape_type.base_shape_type()!= ShapeType::Point {
panic!("The vector outlets data must be of POINT base shape type.");
}
let mut progress: usize;
// Read each line segment into an rtree.
type Location = GeomWithData<[f64; 2], (usize, bool)>;
let mut end_nodes = vec![];
let (mut part_start, mut part_end): (usize, usize);
let mut fid = 0usize; // fid is unique to each part in the vector
let mut polylines = vec![];
for record_num in 0..input.num_records {
let record = input.get_record(record_num);
for part in 0..record.num_parts as usize {
part_start = record.parts[part] as usize;
part_end = if part < record.num_parts as usize - 1 {
record.parts[part + 1] as usize - 1
} else {
record.num_points as usize - 1
};
polylines.push(
Polyline::new(
&record.points[part_start..=part_end],
record_num
)
);
end_nodes.push(Location::new(
[record.points[part_start].x, record.points[part_start].y],
(fid, true)
));
end_nodes.push(Location::new(
[record.points[part_end].x, record.points[part_end].y],
(fid, false)
));
fid += 1;
}
if configurations.verbose_mode {
progress = (100.0_f64 * (record_num + 1) as f64 / input.num_records as f64) as usize;
if progress!= old_progress {
println!("Reading vector: {}%", progress);
old_progress = progress;
}
}
}
let num_polylines = polylines.len(); // will be updated after the joins.
let snap_dist_sq = snap_dist * snap_dist;
let endnode_tree = RTree::bulk_load(end_nodes);
let precision = EPSILON * 10f64;
let mut outlet_pt: Point2D;
let mut p1: Point2D;
let mut visited = vec![false; num_polylines];
let mut reverse = vec![false; num_polylines];
for record_num in 0..outlets.num_records {
let record = outlets.get_record(record_num);
if record.shape_type!= ShapeType::Null {
for p in 0..record.points.len() {
outlet_pt = record.points[p];
let ret = endnode_tree.locate_within_distance([outlet_pt.x, outlet_pt.y], snap_dist_sq);
for pt in ret {
let (fid, is_start) = pt.data;
if!visited[fid] {
visited[fid] = true;
// now find all connected line segments
let mut queue: VecDeque<(usize, bool)> = VecDeque::with_capacity(num_polylines);
queue.push_back((fid, is_start));
while!queue.is_empty() {
let (fid2, is_start2) = queue.pop_front().unwrap();
// Find the point associated with the other end of this polyline
p1 = if!is_start2 {
polylines[fid2].get_first_node()
} else {
// To get here means that you first encountered the beginning of the polyline, which
// shouldn't happen if it is correctly directed, since we are doing a bottom-up
// scan of the network. Therefore, reverse the line in the output.
reverse[fid2] = true;
polylines[fid2].get_last_node()
};
// Find the neighbouring endnodes of p1
let ret2 = endnode_tree.locate_within_distance([p1.x, p1.y], precision);
for pt2 in ret2 {
let (fid_n, is_start_n) = pt2.data;
if fid_n!= fid2 &&!visited[fid_n] {
// Add this newly encountered polyline to the queue
queue.push_back((fid_n, is_start_n));
visited[fid_n] = true;
}
}
}
}
}
}
}
if configurations.verbose_mode {
progress = (100.0_f64 * (fid + 1) as f64 / num_polylines as f64) as usize;
if progress!= old_progress {
println!("Looking for reverse-oriented arcs: {}%", progress);
old_progress = progress;
}
}
}
let mut num_reversed = 0;
for fid in 0..polylines.len() {
if reverse[fid] {
let mut line = polylines[fid].vertices.clone();
line.reverse();
polylines[fid].vertices = line;
num_reversed += 1;
}
}
println!("num. reversed arcs: {num_reversed}");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file.replace(".shp", "_reversed_arcs.shp"), &input, ShapeType::PolyLine, true).expect("Error creating output file");
let mut sfg: ShapefileGeometry;
for fid in 0..polylines.len() {
if reverse[fid] {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[fid].vertices);
output.add_record(sfg);
let record_num = polylines[fid].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
}
}
output.write().expect("Error writing file.");
// create output file
let mut output = Shapefile::initialize_using_file(&output_file, &input, ShapeType::PolyLine, true).expect("Error creating output file"); //?;
// add the attributes
// let in_atts = input.attributes.get_fields();
let mut sfg: ShapefileGeometry;
for poly_id in 0..polylines.len() {
sfg = ShapefileGeometry::new(ShapeType::PolyLine);
sfg.add_part(&polylines[poly_id].vertices);
output.add_record(sfg);
let record_num = polylines[poly_id].id;
let att_data = input.attributes.get_record(record_num);
output.attributes.add_record(att_data.clone(), false);
if configurations.verbose_mode {
progress = (100.0_f64 * (poly_id + 1) as f64 / polylines.len() as f64) as usize;
if progress!= old_progress {
println!("Looking for dangling arcs: {}%", progress);
old_progress = progress;
}
}
}
if configurations.verbose_mode {
println!("Saving data...")
};
output.write().expect("Error writing file.");
let elapsed_time = get_formatted_elapsed_time(start);
if configurations.verbose_mode {
println!(
"\n{}",
&format!("Elapsed Time (Including I/O): {}", elapsed_time)
);
}
Ok(())
}
#[derive(Default, Clone, Debug)]
struct Polyline {
vertices: Vec<Point2D>,
id: usize,
}
impl Index<usize> for Polyline {
type Output = Point2D;
fn index<'a>(&'a self, index: usize) -> &'a Point2D {
&self.vertices[index]
}
}
impl Polyline {
// Creates a new Polyline from vertices
fn new(vertices: &[Point2D], id: usize) -> Self {
Polyline {
vertices: vertices.clone().to_vec(),
id,
}
}
fn | (&self) -> Point2D {
self[0]
}
fn get_last_node(&self) -> Point2D {
self[self.vertices.len() - 1]
}
} | get_first_node | identifier_name |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct | <T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if!res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel
// suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in
// the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0.. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0.. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0.. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
}
| Sender | identifier_name |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct Sender<T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool |
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if!res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel
// suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in
// the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0.. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0.. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0.. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
}
| {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
} | identifier_body |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct Sender<T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if!res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel
// suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in
// the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else | // Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0.. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0.. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0.. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
}
| {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
| conditional_block |
spmc.rs | pub use super::{
NoRecv,
RecvErr::{self, *},
};
use incin::Pause;
use owned_alloc::OwnedAlloc;
use ptr::{bypass_null, check_null_align};
use removable::Removable;
use std::{
fmt,
ptr::{null_mut, NonNull},
sync::{
atomic::{AtomicPtr, Ordering::*},
Arc,
},
};
/// Creates an asynchronous lock-free Single-Producer-Multi-Consumer (SPMC)
/// channel. In order to allow multiple consumers, [`Receiver`] is clonable and
/// does not require mutability.
pub fn create<T>() -> (Sender<T>, Receiver<T>) {
with_incin(SharedIncin::new())
}
/// Same as [`create`], but use a passed incinerator instead of creating a new
/// one.
pub fn with_incin<T>(incin: SharedIncin<T>) -> (Sender<T>, Receiver<T>) {
check_null_align::<Node<T>>();
// First we create a single node shared between two ends.
let alloc = OwnedAlloc::new(Node {
message: Removable::empty(),
next: AtomicPtr::new(null_mut()),
});
let single_node = alloc.into_raw();
// Then put it on back and on the front.
let sender = Sender { back: single_node };
let receiver = Receiver {
inner: Arc::new(ReceiverInner {
front: AtomicPtr::new(single_node.as_ptr()),
incin,
}),
};
(sender, receiver)
}
/// The [`Sender`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function.
pub struct Sender<T> {
back: NonNull<Node<T>>,
}
impl<T> Sender<T> {
/// Sends a message and if the receiver disconnected, an error is returned.
pub fn send(&mut self, message: T) -> Result<(), NoRecv<T>> {
// First we allocate the node for our message.
let alloc = OwnedAlloc::new(Node {
message: Removable::new(message),
next: AtomicPtr::new(null_mut()),
});
let nnptr = alloc.into_raw();
// This dereferral is safe because the queue has at least one node. We
// possess a single node in the back, and if the queue has just one
// node, it is stored in the back (and in the front). Also, we are the
// only ones with access to the back.
let res = unsafe {
// We try to update the back's next pointer. We want to catch any
// bit marking here. A marked lower bit means the receiver
// disconnected.
self.back.as_ref().next.compare_exchange(
null_mut(),
nnptr.as_ptr(),
Release,
Relaxed,
)
};
if res.is_ok() {
// If we succeeded, let's update the back so we keep the invariant
// "the back has a single node".
self.back = nnptr;
Ok(())
} else {
// If we failed, receiver disconnected. It is safe to dealloc
// because this is the node we just allocated, and we did not share
// it with anyone (cas failed).
let mut alloc = unsafe { OwnedAlloc::from_raw(nnptr) };
let message = alloc.message.replace(None).unwrap();
Err(NoRecv { message })
}
}
/// Tests if there are any [`Receiver`]s still connected. There are no
/// guarantees that [`send`](Sender::send) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
pub fn is_connected(&self) -> bool {
// Safe because we always have at least one node, which is only dropped
// in the last side to disconnect's drop.
let back = unsafe { self.back.as_ref() };
back.next.load(Relaxed).is_null()
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// This dereferral is safe because the queue always have at least one
// node. This single node is only dropped when the last side to
// disconnect drops.
let res = unsafe {
// Let's try to mark next's bit so that receiver will see we
// disconnected, if it hasn't disconnected by itself. It is ok to
// just swap, since we have only two possible values (null and
// null | 1) and we everyone will be setting to the same value
// (null | 1).
self.back
.as_ref()
.next
.swap((null_mut::<Node<T>>() as usize | 1) as *mut _, Relaxed)
};
// If the previously stored value was not null, receiver has already
// disconnected. It is safe to drop because we are the only ones that
// have a pointer to the node.
if!res.is_null() {
unsafe { OwnedAlloc::from_raw(self.back) };
}
}
}
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
fmtr.write_str("spmc::Sender")
}
}
unsafe impl<T> Send for Sender<T> where T: Send {}
unsafe impl<T> Sync for Sender<T> where T: Send {}
/// The [`Receiver`] handle of a SPMC channel. Created by [`create`] or
/// [`with_incin`] function. It is clonable and does not require mutability.
pub struct Receiver<T> {
inner: Arc<ReceiverInner<T>>,
}
impl<T> Receiver<T> {
/// Tries to receive a message. If no message is available,
/// [`Err`]`(`[`RecvErr::NoMessage`]`)` is returned. If the sender
/// disconnected, [`Err`]`(`[`RecvErr::NoSender`]`)` is returned.
#[allow(unused_must_use)]
pub fn recv(&self) -> Result<T, RecvErr> {
// We have to pause the incinerator due to ABA problem. This channel | // the front.
let mut front_nnptr = unsafe {
// First we load pointer stored in the front.
bypass_null(self.inner.front.load(Relaxed))
};
loop {
// Let's remove the node logically first. Safe to derefer this
// pointer because we paused the incinerator and we only
// delete nodes via incinerator.
match unsafe { front_nnptr.as_ref().message.take(AcqRel) } {
Some(val) => {
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are
// passing.
unsafe { self.try_clear_first(front_nnptr, &pause) };
break Ok(val);
},
// Safe to call because we passed a pointer from the front
// which was loaded during the very same pause we are passing.
None => unsafe {
front_nnptr = self.try_clear_first(front_nnptr, &pause)?;
},
}
}
}
/// Tests if there are any [`Sender`]s still connected. There are no
/// guarantees that [`recv`](Receiver::recv) will succeed if this method
/// returns `true` because the [`Receiver`] may disconnect meanwhile.
/// This method may also return `true` if the [`Sender`] disconnected
/// but there are messages pending in the buffer. Note that another
/// [`Receiver`] may pop out the pending messages after this method was
/// called.
pub fn is_connected(&self) -> bool {
// We need this pause because of use-after-free.
let _pause = self.inner.incin.inner.pause();
// Safe to derefer this pointer because we paused the incinerator and we
// only delete nodes via incinerator.
let front = unsafe { &*self.inner.front.load(Relaxed) };
front.message.is_present(Relaxed)
|| front.next.load(Relaxed) as usize & 1 == 0
}
/// The shared incinerator used by this [`Receiver`].
pub fn incin(&self) -> SharedIncin<T> {
self.inner.incin.clone()
}
// This function is unsafe because passing the wrong pointer will lead to
// undefined behavior. The pointer must have been loaded from the front
// during the passed pause.
unsafe fn try_clear_first(
&self,
expected: NonNull<Node<T>>,
pause: &Pause<OwnedAlloc<Node<T>>>,
) -> Result<NonNull<Node<T>>, RecvErr> {
let next = expected.as_ref().next.load(Acquire);
if next as usize & 1 == 1 {
// If the next is bit flagged, sender disconnected, no more messages
// ever.
Err(RecvErr::NoSender)
} else if next.is_null() {
// No bit flag means sender is still there but we have no message.
Err(RecvErr::NoMessage)
} else {
let ptr = expected.as_ptr();
// We are not oblied to succeed. This is just cleanup and some other
// thread might do it.
let next = match self
.inner
.front
.compare_exchange(ptr, next, Relaxed, Relaxed)
{
Ok(_) => {
// Only deleting nodes via incinerator due to ABA
// problem and use-after-frees.
pause.add_to_incin(OwnedAlloc::from_raw(expected));
next
},
Err(found) => found,
};
// Safe to by-pass the check since we only store non-null
// pointers on the front.
Ok(bypass_null(next))
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(fmtr, "spmc::Receiver {} ptr: {:p} {}", '{', self.inner, '}')
}
}
unsafe impl<T> Send for Receiver<T> where T: Send {}
unsafe impl<T> Sync for Receiver<T> where T: Send {}
struct ReceiverInner<T> {
// never null
front: AtomicPtr<Node<T>>,
incin: SharedIncin<T>,
}
impl<T> Drop for ReceiverInner<T> {
fn drop(&mut self) {
let front = self.front.get_mut();
loop {
// This null-check-by-pass is safe because we never store null in
// the front.
let front_nnptr = unsafe { bypass_null(*front) };
// This is safe because we are the only receiver left and the list
// will always have at least one node, even in the drop. Of course,
// unless we are the last side to drop (then we do drop it all).
let res = unsafe {
// Let's try to mark the next (which means we disconnected). We
// might fail because either this is not the last node or the
// sender already disconnected and marked this pointer.
front_nnptr.as_ref().next.compare_exchange(
null_mut(),
(null_mut::<Node<T>>() as usize | 1) as *mut _,
AcqRel,
Acquire,
)
};
match res {
// If the succeeded, we are the first side to disconnect and we
// must keep at least one node in the queue.
Ok(_) => break,
Err(next) => {
// Ok, safe to deallocate the front now. We already loaded
// the next field and it is not null.
// Either the queue won't be empty or the
// sender disconnected.
unsafe { OwnedAlloc::from_raw(front_nnptr) };
// This means the sender disconnected we reached the end of
// the queue.
if next as usize & 1 == 1 {
break;
}
// Now let's keep going until the list is empty.
*front = next;
},
}
}
}
}
#[repr(align(/* at least */ 2))]
struct Node<T> {
message: Removable<T>,
// lower bit is 1 if the other side disconnected, 0 means nothing
next: AtomicPtr<Node<T>>,
}
make_shared_incin! {
{ "`spmc::Receiver`" }
pub SharedIncin<T> of OwnedAlloc<Node<T>>
}
#[cfg(test)]
mod test {
use channel::spmc;
use std::{
sync::{
atomic::{AtomicBool, Ordering::*},
Arc,
},
thread,
};
#[test]
fn correct_numbers() {
const THREADS: usize = 8;
const MSGS: usize = 512;
let mut done = Vec::with_capacity(MSGS);
for _ in 0.. MSGS {
done.push(AtomicBool::new(false));
}
let done = Arc::<[AtomicBool]>::from(done);
let (mut sender, receiver) = spmc::create::<usize>();
let mut threads = Vec::with_capacity(THREADS);
for _ in 0.. THREADS {
let done = done.clone();
let receiver = receiver.clone();
threads.push(thread::spawn(move || loop {
match receiver.recv() {
Ok(i) => assert!(!done[i].swap(true, AcqRel)),
Err(spmc::NoSender) => break,
Err(spmc::NoMessage) => (),
}
}))
}
for i in 0.. MSGS {
sender.send(i).unwrap();
}
drop(sender);
for thread in threads {
thread.join().unwrap();
}
for status in done.iter() {
assert!(status.load(Relaxed));
}
}
} | // suffers from it, yeah.
let pause = self.inner.incin.inner.pause();
// Bypassing null check is safe because we never store null in | random_line_split |
intcode.rs | //! This module implements an IntCode interpreter.
use std::convert::TryFrom;
// The following terminology notes are taken from day 2 part 2
// - memory: the list of integers used when interpreting
// - address/position: the value at a given index into memory
// - opcode: mark the beginning of an instruction and denote the instruction
// - parameters: the values after an instruction used by the instruction
// - instruction pointer: the address of the current instruction
#[derive(Debug, PartialEq)]
enum OpCode {
Add = 1, // *(pc+1) + *(pc+2) => *(pc+3)
Multiply = 2, // *(pc+1) * *(pc+2) => *(pc+3)
ReadIn = 3, // store input to *(pc+1)
WriteOut = 4, // print value of *(pc+1) to output
JmpIfTrue = 5, // jump if *(pc+1)!= 0 => ip = *(pc+2)
JmpIfFalse = 6, // jump if *(pc+1) == 0 => ip = *(pc+2)
LessThan = 7, // if *(pc+1) < *(pc+2) => *(pc+3) = 1, else 0
Equals = 8, // if *(pc+1) == *(pc+2) => *(pc+3) = 1, else 0
Halt = 99,
}
impl TryFrom<isize> for OpCode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
1 => Ok(Self::Add),
2 => Ok(Self::Multiply),
3 => Ok(Self::ReadIn),
4 => Ok(Self::WriteOut),
5 => Ok(Self::JmpIfTrue),
6 => Ok(Self::JmpIfFalse),
7 => Ok(Self::LessThan),
8 => Ok(Self::Equals),
99 => Ok(Self::Halt),
_ => Err("invalid opcode value"),
}
}
}
#[derive(Debug, PartialEq)]
enum AddrMode {
Pos = 0,
Imm = 1,
}
impl TryFrom<isize> for AddrMode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
0 => Ok(Self::Pos),
1 => Ok(Self::Imm),
_ => Err("invalid address mode value"),
}
}
}
#[derive(Debug)]
enum IPChange {
Delta(isize),
New(usize),
Halt,
}
/// Parse instruction will take a full instruction, and split it into the original instruction
/// along with addressing modes for each argument.
fn parse_instruction(word: isize) -> Result<(OpCode, AddrMode, AddrMode, AddrMode), &'static str> {
if word <= 0 {
return Err("instruction word must be greater than zero");
}
Ok((
OpCode::try_from(word % 100)?, // first two digits are op
AddrMode::try_from(word / 100 % 10)?, // 100s place
AddrMode::try_from(word / 1000 % 10)?, // 1000s place
AddrMode::try_from(word / 10000 % 10)?, // 10000s place
))
}
/// Trait is used by interpret for reading information interactively
pub trait Input {
fn get_isize(&mut self) -> isize;
}
/// Trait is used by `interpret` for writing information interactively
pub trait Output {
fn write_isize(&mut self, val: isize) -> ();
}
// Implementations for Input trait
impl Input for () {
fn get_isize(&mut self) -> isize {
panic!("Program requested input, but input source was ()");
}
}
impl Input for isize {
fn get_isize(&mut self) -> isize {
*self
}
}
// Implementations for Output trait
impl Output for () {
fn write_isize(&mut self, _val: isize) -> () {
panic!("Program attempted to write value, but out was ()");
}
}
impl Output for &mut Vec<isize> {
fn write_isize(&mut self, val: isize) -> () {
self.push(val)
}
}
/// Interpret array as an IntCode program.
/// | /// `mem` is the initial machine memory state, it is modified during the run
///
/// Will panic if it encounters an unknown opcode
pub fn interpret(mut mem: &mut [isize], mut input: impl Input, mut output: impl Output) -> isize {
let mut ip: usize = 0;
loop {
match step(&mut mem, ip, &mut input, &mut output) {
IPChange::Delta(delta) => ip = (ip as isize + delta) as usize,
IPChange::New(new) => ip = new,
IPChange::Halt => break,
}
}
mem[0]
}
fn step(
mem: &mut [isize],
ip: usize,
input: &mut impl Input,
output: &mut impl Output,
) -> IPChange {
use AddrMode::*;
use OpCode::*;
let (op, addr1, addr2, addr3) = match parse_instruction(mem[ip]) {
Ok(val) => val,
Err(err) => {
println!(
"State:\n\tIP: {}\n\tVals: {:?}, {:?}, {:?}, {:?}",
ip,
mem.get(ip),
mem.get(ip + 1),
mem.get(ip + 2),
mem.get(ip + 3)
);
panic!(format!("Encountered unrecoverable error: {}", err));
}
};
// placing Halt check here so that args can be extracted without duplicating their code all
// over the place
if op == Halt {
return IPChange::Halt;
}
// HACK: this whole block is a hack, need to wrap memory up in a new type and provide accessors
// that understand addressing modes
let arg1 = match addr1 {
Imm => mem.get(ip + 1),
Pos => mem.get(*mem.get(ip + 1).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
let arg2 = match addr2 {
Imm => mem.get(ip + 2),
Pos => mem.get(*mem.get(ip + 2).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
match op {
Add => {
mem[mem[ip + 3] as usize] = arg1 + arg2;
IPChange::Delta(4)
}
Multiply => {
mem[mem[ip + 3] as usize] = arg1 * arg2;
IPChange::Delta(4)
}
ReadIn => {
mem[mem[ip + 1] as usize] = input.get_isize();
IPChange::Delta(2)
}
WriteOut => {
output.write_isize(mem[mem[ip + 1] as usize]);
IPChange::Delta(2)
}
JmpIfTrue => {
if *arg1!= 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
JmpIfFalse => {
if *arg1 == 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
LessThan => {
mem[mem[ip + 3] as usize] = if arg1 < arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Equals => {
mem[mem[ip + 3] as usize] = if arg1 == arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Halt => unreachable!(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn interpret_day2_examples() {
let mut programs = vec![
vec![1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50],
vec![1, 0, 0, 0, 99],
vec![2, 3, 0, 3, 99],
vec![2, 4, 4, 5, 99, 0],
vec![1, 1, 1, 4, 99, 5, 6, 0, 99],
];
let outputs = vec![
vec![3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50],
vec![2, 0, 0, 0, 99],
vec![2, 3, 0, 6, 99],
vec![2, 4, 4, 5, 99, 9801],
vec![30, 1, 1, 4, 2, 5, 6, 0, 99],
];
for i in 0..programs.len() {
assert_eq!(interpret(&mut programs[i], (), ()), outputs[i][0]);
assert_eq!(programs[i], outputs[i]);
}
}
#[test]
fn test_parse_instruction() {
use AddrMode::*;
use OpCode::*;
type Output = (OpCode, AddrMode, AddrMode, AddrMode);
fn eq(left: Output, right: Output) -> bool {
left.0 == right.0 && left.1 == right.1 && left.2 == right.2 && left.3 == right.3
}
// from day 5 examples
assert!(eq(
parse_instruction(1002).unwrap(),
(Multiply, Pos, Imm, Pos)
));
// synthetic
assert!(eq(parse_instruction(2).unwrap(), (Multiply, Pos, Pos, Pos)));
assert!(eq(parse_instruction(11101).unwrap(), (Add, Imm, Imm, Imm)));
assert!(eq(parse_instruction(10101).unwrap(), (Add, Imm, Pos, Imm)));
assert!(eq(
parse_instruction(104).unwrap(),
(WriteOut, Imm, Pos, Pos)
));
assert!(eq(
parse_instruction(10003).unwrap(),
(ReadIn, Pos, Pos, Imm)
));
}
#[test]
fn day5_snippets() {
// This tests immediate and positional addressing and negative immediate support
// Should: find (100 + -1), store result @4
let mut simple_prog = vec![1101, 100, -1, 4, 0];
interpret(&mut simple_prog, (), ());
assert_eq!(simple_prog[4], 99);
// This should save whatever it gets from input to @0, then print it back out
let arb_input = 10346;
let mut output = Vec::new();
let mut simple_io = vec![3, 0, 4, 0, 99];
interpret(&mut simple_io, arb_input, &mut output);
println!("{:?}", output[0]);
println!("{:?}", simple_io);
assert_eq!(simple_io[0], arb_input);
assert_eq!(output[0], arb_input);
}
#[test]
fn day5_jump_tests() {
// These programs compare the input to 8, outputting 1 if eq or lt, 0 otherwise
// they use different methods for each
// test eq
let progs_eq_to_eight = vec![
vec![3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], // positional
vec![3, 3, 1108, -1, 8, 3, 4, 3, 99], // immediate
];
for (input, exp_out) in vec![(0, 0), (8, 1), (-8, 0), (10, 0)] {
for i in 0..progs_eq_to_eight.len() {
let mut prog = progs_eq_to_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0]);
}
}
// test lt
let progs_lt_eight = vec![
vec![3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], // lt positional
vec![3, 3, 1107, -1, 8, 3, 4, 3, 99], // lt immediate
];
for (input, exp_out) in vec![(0, 1), (-1, 1), (8, 0), (10, 0)] {
for i in 0..progs_lt_eight.len() {
let mut prog = progs_lt_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
// test jump
let jump_progs = vec![
vec![3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9], // positional
vec![3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1], // immediate
];
for (input, exp_out) in vec![(0, 0), (-1, 1), (8, 1), (10, 1)] {
for i in 0..jump_progs.len() {
let mut prog = jump_progs[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
#[test]
fn day5_large_test() {
let jmp_prog = vec![
3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0,
0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4,
20, 1105, 1, 46, 98, 99,
];
let in_outs = vec![
(-1, 999),
(0, 999),
(5, 999),
(8, 1000),
(9, 1001),
(14240, 1001),
];
let mut output = Vec::new();
for (input, exp_out) in in_outs.into_iter() {
println!("{:?}", input);
let mut prog = jmp_prog.clone();
output.clear();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
} | random_line_split |
|
intcode.rs | //! This module implements an IntCode interpreter.
use std::convert::TryFrom;
// The following terminology notes are taken from day 2 part 2
// - memory: the list of integers used when interpreting
// - address/position: the value at a given index into memory
// - opcode: mark the beginning of an instruction and denote the instruction
// - parameters: the values after an instruction used by the instruction
// - instruction pointer: the address of the current instruction
#[derive(Debug, PartialEq)]
enum OpCode {
Add = 1, // *(pc+1) + *(pc+2) => *(pc+3)
Multiply = 2, // *(pc+1) * *(pc+2) => *(pc+3)
ReadIn = 3, // store input to *(pc+1)
WriteOut = 4, // print value of *(pc+1) to output
JmpIfTrue = 5, // jump if *(pc+1)!= 0 => ip = *(pc+2)
JmpIfFalse = 6, // jump if *(pc+1) == 0 => ip = *(pc+2)
LessThan = 7, // if *(pc+1) < *(pc+2) => *(pc+3) = 1, else 0
Equals = 8, // if *(pc+1) == *(pc+2) => *(pc+3) = 1, else 0
Halt = 99,
}
impl TryFrom<isize> for OpCode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> |
}
#[derive(Debug, PartialEq)]
enum AddrMode {
Pos = 0,
Imm = 1,
}
impl TryFrom<isize> for AddrMode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
0 => Ok(Self::Pos),
1 => Ok(Self::Imm),
_ => Err("invalid address mode value"),
}
}
}
#[derive(Debug)]
enum IPChange {
Delta(isize),
New(usize),
Halt,
}
/// Parse instruction will take a full instruction, and split it into the original instruction
/// along with addressing modes for each argument.
fn parse_instruction(word: isize) -> Result<(OpCode, AddrMode, AddrMode, AddrMode), &'static str> {
if word <= 0 {
return Err("instruction word must be greater than zero");
}
Ok((
OpCode::try_from(word % 100)?, // first two digits are op
AddrMode::try_from(word / 100 % 10)?, // 100s place
AddrMode::try_from(word / 1000 % 10)?, // 1000s place
AddrMode::try_from(word / 10000 % 10)?, // 10000s place
))
}
/// Trait is used by interpret for reading information interactively
pub trait Input {
fn get_isize(&mut self) -> isize;
}
/// Trait is used by `interpret` for writing information interactively
pub trait Output {
fn write_isize(&mut self, val: isize) -> ();
}
// Implementations for Input trait
impl Input for () {
fn get_isize(&mut self) -> isize {
panic!("Program requested input, but input source was ()");
}
}
impl Input for isize {
fn get_isize(&mut self) -> isize {
*self
}
}
// Implementations for Output trait
impl Output for () {
fn write_isize(&mut self, _val: isize) -> () {
panic!("Program attempted to write value, but out was ()");
}
}
impl Output for &mut Vec<isize> {
fn write_isize(&mut self, val: isize) -> () {
self.push(val)
}
}
/// Interpret array as an IntCode program.
///
/// `mem` is the initial machine memory state, it is modified during the run
///
/// Will panic if it encounters an unknown opcode
pub fn interpret(mut mem: &mut [isize], mut input: impl Input, mut output: impl Output) -> isize {
let mut ip: usize = 0;
loop {
match step(&mut mem, ip, &mut input, &mut output) {
IPChange::Delta(delta) => ip = (ip as isize + delta) as usize,
IPChange::New(new) => ip = new,
IPChange::Halt => break,
}
}
mem[0]
}
fn step(
mem: &mut [isize],
ip: usize,
input: &mut impl Input,
output: &mut impl Output,
) -> IPChange {
use AddrMode::*;
use OpCode::*;
let (op, addr1, addr2, addr3) = match parse_instruction(mem[ip]) {
Ok(val) => val,
Err(err) => {
println!(
"State:\n\tIP: {}\n\tVals: {:?}, {:?}, {:?}, {:?}",
ip,
mem.get(ip),
mem.get(ip + 1),
mem.get(ip + 2),
mem.get(ip + 3)
);
panic!(format!("Encountered unrecoverable error: {}", err));
}
};
// placing Halt check here so that args can be extracted without duplicating their code all
// over the place
if op == Halt {
return IPChange::Halt;
}
// HACK: this whole block is a hack, need to wrap memory up in a new type and provide accessors
// that understand addressing modes
let arg1 = match addr1 {
Imm => mem.get(ip + 1),
Pos => mem.get(*mem.get(ip + 1).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
let arg2 = match addr2 {
Imm => mem.get(ip + 2),
Pos => mem.get(*mem.get(ip + 2).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
match op {
Add => {
mem[mem[ip + 3] as usize] = arg1 + arg2;
IPChange::Delta(4)
}
Multiply => {
mem[mem[ip + 3] as usize] = arg1 * arg2;
IPChange::Delta(4)
}
ReadIn => {
mem[mem[ip + 1] as usize] = input.get_isize();
IPChange::Delta(2)
}
WriteOut => {
output.write_isize(mem[mem[ip + 1] as usize]);
IPChange::Delta(2)
}
JmpIfTrue => {
if *arg1!= 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
JmpIfFalse => {
if *arg1 == 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
LessThan => {
mem[mem[ip + 3] as usize] = if arg1 < arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Equals => {
mem[mem[ip + 3] as usize] = if arg1 == arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Halt => unreachable!(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn interpret_day2_examples() {
let mut programs = vec![
vec![1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50],
vec![1, 0, 0, 0, 99],
vec![2, 3, 0, 3, 99],
vec![2, 4, 4, 5, 99, 0],
vec![1, 1, 1, 4, 99, 5, 6, 0, 99],
];
let outputs = vec![
vec![3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50],
vec![2, 0, 0, 0, 99],
vec![2, 3, 0, 6, 99],
vec![2, 4, 4, 5, 99, 9801],
vec![30, 1, 1, 4, 2, 5, 6, 0, 99],
];
for i in 0..programs.len() {
assert_eq!(interpret(&mut programs[i], (), ()), outputs[i][0]);
assert_eq!(programs[i], outputs[i]);
}
}
#[test]
fn test_parse_instruction() {
use AddrMode::*;
use OpCode::*;
type Output = (OpCode, AddrMode, AddrMode, AddrMode);
fn eq(left: Output, right: Output) -> bool {
left.0 == right.0 && left.1 == right.1 && left.2 == right.2 && left.3 == right.3
}
// from day 5 examples
assert!(eq(
parse_instruction(1002).unwrap(),
(Multiply, Pos, Imm, Pos)
));
// synthetic
assert!(eq(parse_instruction(2).unwrap(), (Multiply, Pos, Pos, Pos)));
assert!(eq(parse_instruction(11101).unwrap(), (Add, Imm, Imm, Imm)));
assert!(eq(parse_instruction(10101).unwrap(), (Add, Imm, Pos, Imm)));
assert!(eq(
parse_instruction(104).unwrap(),
(WriteOut, Imm, Pos, Pos)
));
assert!(eq(
parse_instruction(10003).unwrap(),
(ReadIn, Pos, Pos, Imm)
));
}
#[test]
fn day5_snippets() {
// This tests immediate and positional addressing and negative immediate support
// Should: find (100 + -1), store result @4
let mut simple_prog = vec![1101, 100, -1, 4, 0];
interpret(&mut simple_prog, (), ());
assert_eq!(simple_prog[4], 99);
// This should save whatever it gets from input to @0, then print it back out
let arb_input = 10346;
let mut output = Vec::new();
let mut simple_io = vec![3, 0, 4, 0, 99];
interpret(&mut simple_io, arb_input, &mut output);
println!("{:?}", output[0]);
println!("{:?}", simple_io);
assert_eq!(simple_io[0], arb_input);
assert_eq!(output[0], arb_input);
}
#[test]
fn day5_jump_tests() {
// These programs compare the input to 8, outputting 1 if eq or lt, 0 otherwise
// they use different methods for each
// test eq
let progs_eq_to_eight = vec![
vec![3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], // positional
vec![3, 3, 1108, -1, 8, 3, 4, 3, 99], // immediate
];
for (input, exp_out) in vec![(0, 0), (8, 1), (-8, 0), (10, 0)] {
for i in 0..progs_eq_to_eight.len() {
let mut prog = progs_eq_to_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0]);
}
}
// test lt
let progs_lt_eight = vec![
vec![3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], // lt positional
vec![3, 3, 1107, -1, 8, 3, 4, 3, 99], // lt immediate
];
for (input, exp_out) in vec![(0, 1), (-1, 1), (8, 0), (10, 0)] {
for i in 0..progs_lt_eight.len() {
let mut prog = progs_lt_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
// test jump
let jump_progs = vec![
vec![3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9], // positional
vec![3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1], // immediate
];
for (input, exp_out) in vec![(0, 0), (-1, 1), (8, 1), (10, 1)] {
for i in 0..jump_progs.len() {
let mut prog = jump_progs[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
#[test]
fn day5_large_test() {
let jmp_prog = vec![
3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0,
0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4,
20, 1105, 1, 46, 98, 99,
];
let in_outs = vec![
(-1, 999),
(0, 999),
(5, 999),
(8, 1000),
(9, 1001),
(14240, 1001),
];
let mut output = Vec::new();
for (input, exp_out) in in_outs.into_iter() {
println!("{:?}", input);
let mut prog = jmp_prog.clone();
output.clear();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
| {
match num {
1 => Ok(Self::Add),
2 => Ok(Self::Multiply),
3 => Ok(Self::ReadIn),
4 => Ok(Self::WriteOut),
5 => Ok(Self::JmpIfTrue),
6 => Ok(Self::JmpIfFalse),
7 => Ok(Self::LessThan),
8 => Ok(Self::Equals),
99 => Ok(Self::Halt),
_ => Err("invalid opcode value"),
}
} | identifier_body |
intcode.rs | //! This module implements an IntCode interpreter.
use std::convert::TryFrom;
// The following terminology notes are taken from day 2 part 2
// - memory: the list of integers used when interpreting
// - address/position: the value at a given index into memory
// - opcode: mark the beginning of an instruction and denote the instruction
// - parameters: the values after an instruction used by the instruction
// - instruction pointer: the address of the current instruction
#[derive(Debug, PartialEq)]
enum OpCode {
Add = 1, // *(pc+1) + *(pc+2) => *(pc+3)
Multiply = 2, // *(pc+1) * *(pc+2) => *(pc+3)
ReadIn = 3, // store input to *(pc+1)
WriteOut = 4, // print value of *(pc+1) to output
JmpIfTrue = 5, // jump if *(pc+1)!= 0 => ip = *(pc+2)
JmpIfFalse = 6, // jump if *(pc+1) == 0 => ip = *(pc+2)
LessThan = 7, // if *(pc+1) < *(pc+2) => *(pc+3) = 1, else 0
Equals = 8, // if *(pc+1) == *(pc+2) => *(pc+3) = 1, else 0
Halt = 99,
}
impl TryFrom<isize> for OpCode {
type Error = &'static str;
fn | (num: isize) -> Result<Self, Self::Error> {
match num {
1 => Ok(Self::Add),
2 => Ok(Self::Multiply),
3 => Ok(Self::ReadIn),
4 => Ok(Self::WriteOut),
5 => Ok(Self::JmpIfTrue),
6 => Ok(Self::JmpIfFalse),
7 => Ok(Self::LessThan),
8 => Ok(Self::Equals),
99 => Ok(Self::Halt),
_ => Err("invalid opcode value"),
}
}
}
#[derive(Debug, PartialEq)]
enum AddrMode {
Pos = 0,
Imm = 1,
}
impl TryFrom<isize> for AddrMode {
type Error = &'static str;
fn try_from(num: isize) -> Result<Self, Self::Error> {
match num {
0 => Ok(Self::Pos),
1 => Ok(Self::Imm),
_ => Err("invalid address mode value"),
}
}
}
#[derive(Debug)]
enum IPChange {
Delta(isize),
New(usize),
Halt,
}
/// Parse instruction will take a full instruction, and split it into the original instruction
/// along with addressing modes for each argument.
fn parse_instruction(word: isize) -> Result<(OpCode, AddrMode, AddrMode, AddrMode), &'static str> {
if word <= 0 {
return Err("instruction word must be greater than zero");
}
Ok((
OpCode::try_from(word % 100)?, // first two digits are op
AddrMode::try_from(word / 100 % 10)?, // 100s place
AddrMode::try_from(word / 1000 % 10)?, // 1000s place
AddrMode::try_from(word / 10000 % 10)?, // 10000s place
))
}
/// Trait is used by interpret for reading information interactively
pub trait Input {
fn get_isize(&mut self) -> isize;
}
/// Trait is used by `interpret` for writing information interactively
pub trait Output {
fn write_isize(&mut self, val: isize) -> ();
}
// Implementations for Input trait
impl Input for () {
fn get_isize(&mut self) -> isize {
panic!("Program requested input, but input source was ()");
}
}
impl Input for isize {
fn get_isize(&mut self) -> isize {
*self
}
}
// Implementations for Output trait
impl Output for () {
fn write_isize(&mut self, _val: isize) -> () {
panic!("Program attempted to write value, but out was ()");
}
}
impl Output for &mut Vec<isize> {
fn write_isize(&mut self, val: isize) -> () {
self.push(val)
}
}
/// Interpret array as an IntCode program.
///
/// `mem` is the initial machine memory state, it is modified during the run
///
/// Will panic if it encounters an unknown opcode
pub fn interpret(mut mem: &mut [isize], mut input: impl Input, mut output: impl Output) -> isize {
let mut ip: usize = 0;
loop {
match step(&mut mem, ip, &mut input, &mut output) {
IPChange::Delta(delta) => ip = (ip as isize + delta) as usize,
IPChange::New(new) => ip = new,
IPChange::Halt => break,
}
}
mem[0]
}
fn step(
mem: &mut [isize],
ip: usize,
input: &mut impl Input,
output: &mut impl Output,
) -> IPChange {
use AddrMode::*;
use OpCode::*;
let (op, addr1, addr2, addr3) = match parse_instruction(mem[ip]) {
Ok(val) => val,
Err(err) => {
println!(
"State:\n\tIP: {}\n\tVals: {:?}, {:?}, {:?}, {:?}",
ip,
mem.get(ip),
mem.get(ip + 1),
mem.get(ip + 2),
mem.get(ip + 3)
);
panic!(format!("Encountered unrecoverable error: {}", err));
}
};
// placing Halt check here so that args can be extracted without duplicating their code all
// over the place
if op == Halt {
return IPChange::Halt;
}
// HACK: this whole block is a hack, need to wrap memory up in a new type and provide accessors
// that understand addressing modes
let arg1 = match addr1 {
Imm => mem.get(ip + 1),
Pos => mem.get(*mem.get(ip + 1).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
let arg2 = match addr2 {
Imm => mem.get(ip + 2),
Pos => mem.get(*mem.get(ip + 2).unwrap_or(&0) as usize),
}
.unwrap_or(&-1337);
match op {
Add => {
mem[mem[ip + 3] as usize] = arg1 + arg2;
IPChange::Delta(4)
}
Multiply => {
mem[mem[ip + 3] as usize] = arg1 * arg2;
IPChange::Delta(4)
}
ReadIn => {
mem[mem[ip + 1] as usize] = input.get_isize();
IPChange::Delta(2)
}
WriteOut => {
output.write_isize(mem[mem[ip + 1] as usize]);
IPChange::Delta(2)
}
JmpIfTrue => {
if *arg1!= 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
JmpIfFalse => {
if *arg1 == 0 {
IPChange::New(usize::try_from(*arg2).unwrap())
} else {
IPChange::Delta(3)
}
}
LessThan => {
mem[mem[ip + 3] as usize] = if arg1 < arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Equals => {
mem[mem[ip + 3] as usize] = if arg1 == arg2 { 1 } else { 0 };
IPChange::Delta(4)
}
Halt => unreachable!(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn interpret_day2_examples() {
let mut programs = vec![
vec![1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50],
vec![1, 0, 0, 0, 99],
vec![2, 3, 0, 3, 99],
vec![2, 4, 4, 5, 99, 0],
vec![1, 1, 1, 4, 99, 5, 6, 0, 99],
];
let outputs = vec![
vec![3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50],
vec![2, 0, 0, 0, 99],
vec![2, 3, 0, 6, 99],
vec![2, 4, 4, 5, 99, 9801],
vec![30, 1, 1, 4, 2, 5, 6, 0, 99],
];
for i in 0..programs.len() {
assert_eq!(interpret(&mut programs[i], (), ()), outputs[i][0]);
assert_eq!(programs[i], outputs[i]);
}
}
#[test]
fn test_parse_instruction() {
use AddrMode::*;
use OpCode::*;
type Output = (OpCode, AddrMode, AddrMode, AddrMode);
fn eq(left: Output, right: Output) -> bool {
left.0 == right.0 && left.1 == right.1 && left.2 == right.2 && left.3 == right.3
}
// from day 5 examples
assert!(eq(
parse_instruction(1002).unwrap(),
(Multiply, Pos, Imm, Pos)
));
// synthetic
assert!(eq(parse_instruction(2).unwrap(), (Multiply, Pos, Pos, Pos)));
assert!(eq(parse_instruction(11101).unwrap(), (Add, Imm, Imm, Imm)));
assert!(eq(parse_instruction(10101).unwrap(), (Add, Imm, Pos, Imm)));
assert!(eq(
parse_instruction(104).unwrap(),
(WriteOut, Imm, Pos, Pos)
));
assert!(eq(
parse_instruction(10003).unwrap(),
(ReadIn, Pos, Pos, Imm)
));
}
#[test]
fn day5_snippets() {
// This tests immediate and positional addressing and negative immediate support
// Should: find (100 + -1), store result @4
let mut simple_prog = vec![1101, 100, -1, 4, 0];
interpret(&mut simple_prog, (), ());
assert_eq!(simple_prog[4], 99);
// This should save whatever it gets from input to @0, then print it back out
let arb_input = 10346;
let mut output = Vec::new();
let mut simple_io = vec![3, 0, 4, 0, 99];
interpret(&mut simple_io, arb_input, &mut output);
println!("{:?}", output[0]);
println!("{:?}", simple_io);
assert_eq!(simple_io[0], arb_input);
assert_eq!(output[0], arb_input);
}
#[test]
fn day5_jump_tests() {
// These programs compare the input to 8, outputting 1 if eq or lt, 0 otherwise
// they use different methods for each
// test eq
let progs_eq_to_eight = vec![
vec![3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8], // positional
vec![3, 3, 1108, -1, 8, 3, 4, 3, 99], // immediate
];
for (input, exp_out) in vec![(0, 0), (8, 1), (-8, 0), (10, 0)] {
for i in 0..progs_eq_to_eight.len() {
let mut prog = progs_eq_to_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0]);
}
}
// test lt
let progs_lt_eight = vec![
vec![3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8], // lt positional
vec![3, 3, 1107, -1, 8, 3, 4, 3, 99], // lt immediate
];
for (input, exp_out) in vec![(0, 1), (-1, 1), (8, 0), (10, 0)] {
for i in 0..progs_lt_eight.len() {
let mut prog = progs_lt_eight[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
// test jump
let jump_progs = vec![
vec![3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9], // positional
vec![3, 3, 1105, -1, 9, 1101, 0, 0, 12, 4, 12, 99, 1], // immediate
];
for (input, exp_out) in vec![(0, 0), (-1, 1), (8, 1), (10, 1)] {
for i in 0..jump_progs.len() {
let mut prog = jump_progs[i].clone();
let mut output = Vec::new();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
#[test]
fn day5_large_test() {
let jmp_prog = vec![
3, 21, 1008, 21, 8, 20, 1005, 20, 22, 107, 8, 21, 20, 1006, 20, 31, 1106, 0, 36, 98, 0,
0, 1002, 21, 125, 20, 4, 20, 1105, 1, 46, 104, 999, 1105, 1, 46, 1101, 1000, 1, 20, 4,
20, 1105, 1, 46, 98, 99,
];
let in_outs = vec![
(-1, 999),
(0, 999),
(5, 999),
(8, 1000),
(9, 1001),
(14240, 1001),
];
let mut output = Vec::new();
for (input, exp_out) in in_outs.into_iter() {
println!("{:?}", input);
let mut prog = jmp_prog.clone();
output.clear();
interpret(&mut prog, input, &mut output);
assert_eq!(exp_out, output[0], "input: {}", input);
}
}
}
| try_from | identifier_name |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event,..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String |
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn get_hdd() -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") { continue; }
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; }
out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
} | identifier_body |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event,..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
}
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn get_hdd() -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") |
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; }
out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | { continue; } | conditional_block |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event,..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
}
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn | () -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") { continue; }
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; }
out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | get_hdd | identifier_name |
main.rs | #![recursion_limit="128"]
//#![allow(unused_imports)]
//#![allow(dead_code)]
extern crate gl;
extern crate glutin;
#[macro_use] extern crate nom;
extern crate image;
extern crate time;
extern crate cupid;
extern crate sysinfo;
extern crate systemstat;
extern crate bytesize;
// extern crate subprocess;
use {
gl::*,
glutin::{
// dpi::*,
event::{Event, WindowEvent, Event::DeviceEvent, },
event_loop::{ControlFlow, EventLoop, },
},
sysinfo::SystemExt,
systemstat::{System, Platform},
// crate::{
// // render::{ * },
// // shader::{ Shader, },
// },
};
// in project stuff
pub mod display; // I think I still need this for storing window dimensions
pub mod gamemgr;
pub mod input;
pub mod loader; // Can be simplified
pub mod render;
pub mod shader;
pub mod text;
pub mod texture; // needed for font atlas but needed things can be ported out
pub mod timer;
pub mod util;
pub use display::Display;
pub use input::Handler;
pub use loader::Loader;
pub use render::{RenderMgr, };
pub use shader::Shader;
pub use timer::Timer;
fn main() {
// Test code for parsing fnt files
// use text::metafile::test_noms;
// test_noms();
// Specify OpenGL version
let gl_request = glutin::GlRequest::Specific(glutin::Api::OpenGl, (4, 3));
let gl_profile = glutin::GlProfile::Core;
// Create a window
let el = EventLoop::new();
let wb = glutin::window::WindowBuilder::new()
.with_title("RaumEn SysInfo")
.with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0))
.with_maximized(false);
let windowed_context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.build_windowed(wb, &el)
.unwrap();
let windowed_context = unsafe { windowed_context.make_current().unwrap() };
// Set up OpenGL
unsafe {
load_with(|symbol| windowed_context.context().get_proc_address(symbol) as *const _);
ClearColor(0.0, 1.0, 0.0, 1.0);
}
let mut render_mgr = RenderMgr::new();
let mut mgr = render_mgr.mgr.clone();
let mut system = sysinfo::System::new();
let cpu = cpu_name();
let ram = get_ram_total(&mut system);
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let hdd = get_hdd();
let mut fps: f32 = 30.0;
let mut once_per_sec = false;
let mut clean_up_time = false;
{ // Here, we're getting the size of the window in pixels
// and passing it to the update_size() method. It in turn
// updates the Projection Matrix and passes that to
// ALL THE SHADERS, so if you add a SHADER, you need
// to REMEMBER to add that shader to the update_size()
// method near the bottom of this file.
// let dpi = windowed_context.window().get_hidpi_factor();
let size: glutin::dpi::PhysicalSize<u32> = windowed_context.window().inner_size();
mgr.update_size(size.into());
}
{
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.add_font(mgr.clone(), "pirate");
textmgr.add_font(mgr.clone(), "sans");
textmgr.new_text(mgr.clone(), "Title", "SysInfo", "pirate", 4.0, 0.0, 0.0, 1.0, true, true);
textmgr.new_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""), "sans", 2.0, 0.0, 0.4, 1.0, true, true);
textmgr.new_text(mgr.clone(), "FPS", "FPS: 0.0", "sans", 1.5, 0.0, 0.0, 0.3, false, true);
}
// Game loop!
println!("Starting main loop.");
el.run(move |event, _, control_flow| {
// *control_flow = ControlFlow::Wait;
match event {
Event::LoopDestroyed => {
println!("Cleaning Up...");
// Clean up
render_mgr.clean_up();
clean_up_time = true;
return
}
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; },
WindowEvent::Resized(size) => {
windowed_context.resize(size);
mgr.update_size(size.into());
},
_ => { mgr.handler_do(|handler| { handler.window_event(&event); }); }
},
DeviceEvent{ event,..} => { mgr.handler_do(|handler| { handler.device_event(&event); }); }
Event::NewEvents( _time ) => {
// Emitted when new events arrive from the OS to be processed.
//
// This event type is useful as a place to put code that should be done before you start processing events, such as
// updating frame timing information for benchmarking or checking the StartCause][crate::event::StartCause] to see
// if a timer set by [ControlFlow::WaitUntil has elapsed.
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing is about to begin.
// This event is useful as a place to put your code that should be run after all state-changing events have been
// handled and you want to do stuff (updating state, performing calculations, etc) that happens as the "main body"
// of your event loop.
// If your program draws graphics, it's usually better to do it in response to Event::RedrawRequested, which gets
// emitted immediately after this event.
{
let mut handler = mgr.handler.lock().unwrap();
handler.timer.tick();
handler.reset_delta();
if handler.timer.once_per_sec() {
fps = handler.timer.fps;
once_per_sec = true;
}
}
if once_per_sec {
once_per_sec = false;
println!("Once per second FPS: {}", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
let cpu_ram = mk_cpu_ram_str(&cpu, &ram, &mut system);
let _textmgr = mgr.clone().textmgr.take().unwrap();
let mut textmgr = _textmgr.lock().unwrap();
textmgr.update_text(mgr.clone(), "CPU RAM HDD", &[cpu_ram, hdd.clone()].join(""));
textmgr.update_text(mgr.clone(), "FPS", &format!("FPS: {:.3}", (fps * 1000.0).round() / 1000.0 ) );
}
windowed_context.window().request_redraw();
}
Event::RedrawRequested(_) => {
// Emitted after MainEventsCleared when a window should be redrawn.
// This gets triggered in two scenarios:
// - The OS has performed an operation that's invalidated the window's contents (such as resizing the window).
// - The application has explicitly requested a redraw via Window::request_redraw.
// During each iteration of the event loop, Winit will aggregate duplicate redraw requests into a single event,
// to help avoid duplicating rendering work.
if clean_up_time { return; }
// *** Drawing phase
render_mgr.render();
// _fbo_final.blit_to_screen(&world);
// Write the new frame to the screen!
windowed_context.swap_buffers().unwrap();
}
Event::RedrawEventsCleared => {
// Emitted after all RedrawRequested events have been processed and control flow is about to be taken away from
// the program. If there are no RedrawRequested events, it is emitted immediately after MainEventsCleared.
// This event is useful for doing any cleanup or bookkeeping work after all the rendering tasks have been completed.
}
e => println!("Other Event:\n{:?}", e)
}
});
}
pub const EOF: &str = "\04";
pub fn eof(string: &str) -> String {
[string, EOF].join("")
}
// pub fn call_cmd(cmd: &str) -> Result<String, String> {
// use subprocess::{Exec,Redirection};
// let out = Exec::shell(cmd)
// .stdout(Redirection::Pipe)
// .capture().map_err(|e|e.to_string())?
// .stdout_str();
// return Ok(out.trim().to_owned());
// }
// use nom::{multispace, rest_s};
// named!(_cpu_name<&str, String>,
// do_parse!(
// tag!("Name") >> multispace >> out: rest_s >>
// ( out.to_owned() )
// )
// );
fn mk_cpu_ram_str(cpu: &str, ram: &str, system: &mut sysinfo::System) -> String {
let ram_used = get_ram_used(system);
[cpu.to_owned(), ram.to_owned(), ram_used].join("\n")
}
fn cpu_name() -> String {
// use cupid;
let info = cupid::master();
match info {
Some(x) => {
match x.brand_string() {
Some(x) => { ["CPU: ".to_owned(), x.to_owned()].join("") }
_ => { "Could not get CPU Name".to_owned() }
}
}
_ => { "Could not get CPU Name".to_owned() }
}
}
fn get_ram_total(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_total = ((system.get_total_memory() as f32 / 1024.0) / 1024.0).round();
format!("Total Memory: {} GB", ram_total )
}
fn get_ram_used(system: &mut sysinfo::System) -> String {
system.refresh_all();
let ram_used = (((system.get_used_memory() as f32 / 1024.0) / 1024.0) * 1000.0).round() / 1000.0;
format!("Used Memory : {:.3} GB", ram_used )
}
fn get_hdd() -> String {
let sys = System::new();
let mut out = String::new();
match sys.mounts() {
Ok(mounts) => {
for mount in mounts.iter() {
if mount.total == bytesize::ByteSize::b(0) { continue; }
let mnt = mount.fs_mounted_on.clone();
if is_dir_or_subdir_linux(&mnt, "/boot") { continue; }
if is_dir_or_subdir_linux(&mnt, "/dev") { continue; }
if is_dir_or_subdir_linux(&mnt, "/run") { continue; }
if is_dir_or_subdir_linux(&mnt, "/snap") { continue; }
if is_dir_or_subdir_linux(&mnt, "/sys") { continue; } | out = format!("{}\n{} Size: {}; Free: {}",
out, mount.fs_mounted_on, mount.total, mount.avail);
}
}
Err(x) => println!("\nMounts: error: {}", x)
}
out
}
fn is_dir_or_subdir_linux(test: &str, path: &str) -> bool {
let tc = test.chars().count();
let pc = path.chars().count();
let pc2 = pc + 1;
let path2: &str = &format!("{}/", path);
if (tc == pc && test == path)
|| (tc > pc2 && &test[..pc2] == path2)
{ return true; }
false
} | random_line_split |
|
lib.rs | #[macro_use]
extern crate compre_combinee;
extern crate combine;
mod errors;
mod details;
mod traits;
mod stop_watch;
use std::collections::{HashMap};
use combine::{parser, eof, satisfy, choice, attempt};
use combine::parser::range::{take_while1};
use combine::parser::char::*;
use combine::{Parser, many, optional, skip_many, sep_by, between};
pub use crate::errors::ErrorCause;
pub use crate::details::Node;
pub use crate::traits::*;
use std::{f64, mem, str};
use std::convert::TryFrom;
use smol_str::SmolStr;
fn parse_hex<'a>() -> impl Parser<&'a str, Output = u32> {
satisfy(|c: char|
(c >= '0' && c <= '9') ||
(c >= 'a' && c <= 'f') ||
(c >= 'A' && c <= 'F')
).map(|c| if c >= '0' && c <= '9' {
c as u64 - '0' as u64
} else if c >= 'a' && c <= 'f' {
10 + c as u64 - 'a' as u64
} else {
10 + c as u64 - 'A' as u64
} as u32
)
}
fn unicode_char<'a>() -> impl Parser<&'a str, Output = Option<char>> {
c_hx_do!{
__ <- string(r#"\u"#),
d3 <- parse_hex(),
d2 <- parse_hex(),
d1 <- parse_hex(),
d0 <- parse_hex();
{
let unicode = d0 +
0x10 * d1 +
0x100 * d2 +
0x1000 * d3;
char::try_from(unicode).ok()
}
}
}
#[derive(PartialEq)]
enum StringPiece<'a >
{
Ref(&'a str),
Char(Option<char>)
}
fn braced_parser<'a, PBL, P, PBR, O>(pbl: PBL, p: P, pbr: PBR) -> impl Parser<&'a str, Output = O>
where
PBL: Parser<&'a str>,
PBR: Parser<&'a str>,
P: Parser<&'a str, Output = O>
{
between(
c_compre![c; c <- pbl, __ <- skip_many(space())],
c_compre![c; __ <- skip_many(space()), c <- pbr],
p
)
}
fn string_part<'a>() -> impl Parser<&'a str, Output = Vec<StringPiece<'a >>> {
many(
choice(
(
attempt(take_while1(|c: char| c!= '\\' && c!= '"' && c!= '\n' && c!= '\r' && c!= '\t')
.map(|chars: &str| StringPiece::Ref(chars))),
attempt(string("\\\"").map(|_|StringPiece::Ref("\""))),
attempt(string("\\\\").map(|_|StringPiece::Ref("\\"))),
attempt(string("\\n").map(|_|StringPiece::Ref("\n"))),
attempt(string("\\t").map(|_|StringPiece::Ref("\t"))),
attempt(string("\\/").map(|_|StringPiece::Ref("/"))),
attempt(string("\\r").map(|_|StringPiece::Ref("\r"))),
attempt(string("\\f").map(|_|StringPiece::Ref("\u{000c}"))),
attempt(string("\\b").map(|_|StringPiece::Ref("\u{0008}"))),
attempt(unicode_char().map(|s|StringPiece::Char(s))),
)
)
)
}
fn string_parser_inner<'a>() -> impl Parser<&'a str, Output = SmolStr> {
c_hx_do! {
x <- between(char('"'), char('"'), string_part());
{
let cap = x.iter().fold(0, |acc, s|
acc +
match s {
StringPiece::Ref(strref) => strref.len(),
StringPiece::Char(c) => c.map(|c_inner| c_inner.len_utf8()).unwrap_or(0)
}
);
if cap <= 22 {
let mut buf: [u8; 22] = [0; 22];
let mut offset = 0;
for s in x.iter() {
match s {
StringPiece::Ref(strref) => {
for &b in strref.as_bytes() {
buf[offset] = b;
offset += 1;
}
},
StringPiece::Char(c) => {
if let Some(chr) = c {
chr.encode_utf8(&mut buf[offset..]);
offset += chr.len_utf8();
}
}
}
}
return unsafe {
SmolStr::new(str::from_utf8_unchecked(&buf[0..cap]))
};
}
let mut str = String::with_capacity(cap);
for s in x.iter() {
match s {
StringPiece::Ref(strref) => str.push_str(strref),
StringPiece::Char(c) => if let Some(chr) = c { str.push(*chr); }
}
}
SmolStr::new(str)
}
}
}
fn string_parser<'a>() -> impl Parser<&'a str, Output = Node> {
string_parser_inner().map(|x| Node::String(x))
}
fn digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
take_while1(|c: char| c >= '0' && c <= '9')
}
#[inline(always)]
fn power(lhs: f64, rhs: f64) -> f64 {
lhs.powf(rhs)
}
fn trailing_digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
c_hx_do! {
__ <- char('.'),
rest <- digit_sequence();
rest
}
}
| __ <- satisfy(|c: char| c == 'e' || c == 'E'),
sign_char <- optional(satisfy(|c: char| c == '+' || c == '-')),
digits <- digit_sequence();
{
let sign = match sign_char {
Some('-') => -1.0,
_ => 1.0
};
let mut acc = 0;
for c in digits.as_bytes() {
acc = acc * 10 + (c - b'0') as u64;
}
power(10.0, sign * acc as f64)
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum NumberPrefix<'a >
{
LeadingZero,
Digits(char, &'a str)
}
fn leading_zero_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
char('0').map(|_| NumberPrefix::LeadingZero)
}
fn leading_digits_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
c_hx_do! {
leading_digit <- satisfy(|c: char| c >= '1' && c <= '9'),
digs <- optional(digit_sequence());
NumberPrefix::Digits(leading_digit, digs.unwrap_or(""))
}
}
fn leading_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
choice((
attempt(leading_digits_parser()),
attempt(leading_zero_parser()),
))
}
fn number_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do! {
minus_sign <- optional(char('-')),
leading <- leading_parser(),
trail <- optional(trailing_digit_sequence()),
exp <- optional(exponent_parser());
{
Node::Number({
let mut acc = match leading {
NumberPrefix::LeadingZero => 0.0,
NumberPrefix::Digits(leading_digit, l_digs) => {
let mut l = (leading_digit as u8 - b'0') as u64;
for c in l_digs.as_bytes() {
l = l * 10 + (c - b'0') as u64;
}
l as f64
}
};
if let Some(t_digs) = trail {
let mut divider = 1.0;
for c in t_digs.as_bytes() {
divider /= 10.0;
acc += (c - b'0') as f64 * divider;
}
}
if let Some(exponent) = exp {
acc *= exponent;
}
if let Some(_) = minus_sign {
-acc
} else {
acc
}
})
}
}
}
fn bool_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
word <- string("true").or(string("false"));
match word {
"true" => Node::Boolean(true),
_ => Node::Boolean(false)
}
}
}
fn null_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
_word <- string("null");
Node::Null
}
}
macro_rules! ref_parser {
($parser_fn:ident) => {
parser(|input| {
let _: &mut &str = input;
$parser_fn().parse_stream(input).into_result()
})
}
}
fn primitive_parser<'a>() -> impl Parser<&'a str, Output = Node> {
let possible_parser = bool_parser()
.or(number_parser())
.or(string_parser())
.or(null_parser())
.or(ref_parser!(array_parser))
.or(ref_parser!(dictionary_parser));
c_hx_do! {
__ <- skip_many(space()),
pars <- possible_parser,
___ <- skip_many(space());
pars
}
}
fn array_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('['),
sep_by(primitive_parser(), char(',')),
char(']')
).map(|nodes: Vec<Node>|
Node::Array(nodes)
)
}
fn pair_parser<'a>() -> impl Parser<&'a str, Output = Option<(SmolStr, Node)>> {
let str_parser = c_hx_do!{
__ <- skip_many(space()),
stp <- string_parser_inner(),
___ <- skip_many(space());
stp
};
c_hx_do!{
l <- str_parser,
__ <- char(':'),
r <- primitive_parser();
Some((l, r))
}
}
fn dictionary_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('{'),
sep_by(pair_parser(), char(',')),
char('}')
).map(|mut pairs: Vec<Option<(SmolStr, Node)>>| {
let mut dict = HashMap::with_capacity(pairs.len());
for mut pair in pairs {
let (l, r) = mem::replace(&mut pair, None).unwrap();
dict.insert(l, r);
}
Node::Object(
dict
)
})
}
fn json_parser<'a>() -> impl Parser<&'a str, Output = Node> {
null_parser()
.or(bool_parser())
.or(number_parser())
.or(string_parser())
.or(array_parser())
.or(dictionary_parser())
}
pub fn parse_json(content: &str) -> Result<Node, String> {
let mut parser = c_hx_do!{
__ <- skip_many(space()),
json <- json_parser(),
___ <- skip_many(space()),
____ <- eof();
json
};
let res = parser.parse(content);
match res {
Err(x) => Err(format!("{}", x.to_string())),
Ok((res,_)) => Ok(res)
}
} | fn exponent_parser<'a>() -> impl Parser<&'a str, Output = f64> {
c_hx_do!{ | random_line_split |
lib.rs | #[macro_use]
extern crate compre_combinee;
extern crate combine;
mod errors;
mod details;
mod traits;
mod stop_watch;
use std::collections::{HashMap};
use combine::{parser, eof, satisfy, choice, attempt};
use combine::parser::range::{take_while1};
use combine::parser::char::*;
use combine::{Parser, many, optional, skip_many, sep_by, between};
pub use crate::errors::ErrorCause;
pub use crate::details::Node;
pub use crate::traits::*;
use std::{f64, mem, str};
use std::convert::TryFrom;
use smol_str::SmolStr;
fn parse_hex<'a>() -> impl Parser<&'a str, Output = u32> {
satisfy(|c: char|
(c >= '0' && c <= '9') ||
(c >= 'a' && c <= 'f') ||
(c >= 'A' && c <= 'F')
).map(|c| if c >= '0' && c <= '9' {
c as u64 - '0' as u64
} else if c >= 'a' && c <= 'f' {
10 + c as u64 - 'a' as u64
} else {
10 + c as u64 - 'A' as u64
} as u32
)
}
fn unicode_char<'a>() -> impl Parser<&'a str, Output = Option<char>> {
c_hx_do!{
__ <- string(r#"\u"#),
d3 <- parse_hex(),
d2 <- parse_hex(),
d1 <- parse_hex(),
d0 <- parse_hex();
{
let unicode = d0 +
0x10 * d1 +
0x100 * d2 +
0x1000 * d3;
char::try_from(unicode).ok()
}
}
}
#[derive(PartialEq)]
enum StringPiece<'a >
{
Ref(&'a str),
Char(Option<char>)
}
fn braced_parser<'a, PBL, P, PBR, O>(pbl: PBL, p: P, pbr: PBR) -> impl Parser<&'a str, Output = O>
where
PBL: Parser<&'a str>,
PBR: Parser<&'a str>,
P: Parser<&'a str, Output = O>
{
between(
c_compre![c; c <- pbl, __ <- skip_many(space())],
c_compre![c; __ <- skip_many(space()), c <- pbr],
p
)
}
fn string_part<'a>() -> impl Parser<&'a str, Output = Vec<StringPiece<'a >>> {
many(
choice(
(
attempt(take_while1(|c: char| c!= '\\' && c!= '"' && c!= '\n' && c!= '\r' && c!= '\t')
.map(|chars: &str| StringPiece::Ref(chars))),
attempt(string("\\\"").map(|_|StringPiece::Ref("\""))),
attempt(string("\\\\").map(|_|StringPiece::Ref("\\"))),
attempt(string("\\n").map(|_|StringPiece::Ref("\n"))),
attempt(string("\\t").map(|_|StringPiece::Ref("\t"))),
attempt(string("\\/").map(|_|StringPiece::Ref("/"))),
attempt(string("\\r").map(|_|StringPiece::Ref("\r"))),
attempt(string("\\f").map(|_|StringPiece::Ref("\u{000c}"))),
attempt(string("\\b").map(|_|StringPiece::Ref("\u{0008}"))),
attempt(unicode_char().map(|s|StringPiece::Char(s))),
)
)
)
}
fn string_parser_inner<'a>() -> impl Parser<&'a str, Output = SmolStr> {
c_hx_do! {
x <- between(char('"'), char('"'), string_part());
{
let cap = x.iter().fold(0, |acc, s|
acc +
match s {
StringPiece::Ref(strref) => strref.len(),
StringPiece::Char(c) => c.map(|c_inner| c_inner.len_utf8()).unwrap_or(0)
}
);
if cap <= 22 {
let mut buf: [u8; 22] = [0; 22];
let mut offset = 0;
for s in x.iter() {
match s {
StringPiece::Ref(strref) => {
for &b in strref.as_bytes() {
buf[offset] = b;
offset += 1;
}
},
StringPiece::Char(c) => {
if let Some(chr) = c {
chr.encode_utf8(&mut buf[offset..]);
offset += chr.len_utf8();
}
}
}
}
return unsafe {
SmolStr::new(str::from_utf8_unchecked(&buf[0..cap]))
};
}
let mut str = String::with_capacity(cap);
for s in x.iter() {
match s {
StringPiece::Ref(strref) => str.push_str(strref),
StringPiece::Char(c) => if let Some(chr) = c { str.push(*chr); }
}
}
SmolStr::new(str)
}
}
}
fn string_parser<'a>() -> impl Parser<&'a str, Output = Node> {
string_parser_inner().map(|x| Node::String(x))
}
fn digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
take_while1(|c: char| c >= '0' && c <= '9')
}
#[inline(always)]
fn power(lhs: f64, rhs: f64) -> f64 {
lhs.powf(rhs)
}
fn trailing_digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
c_hx_do! {
__ <- char('.'),
rest <- digit_sequence();
rest
}
}
fn exponent_parser<'a>() -> impl Parser<&'a str, Output = f64> {
c_hx_do!{
__ <- satisfy(|c: char| c == 'e' || c == 'E'),
sign_char <- optional(satisfy(|c: char| c == '+' || c == '-')),
digits <- digit_sequence();
{
let sign = match sign_char {
Some('-') => -1.0,
_ => 1.0
};
let mut acc = 0;
for c in digits.as_bytes() {
acc = acc * 10 + (c - b'0') as u64;
}
power(10.0, sign * acc as f64)
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum NumberPrefix<'a >
{
LeadingZero,
Digits(char, &'a str)
}
fn leading_zero_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
char('0').map(|_| NumberPrefix::LeadingZero)
}
fn leading_digits_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
c_hx_do! {
leading_digit <- satisfy(|c: char| c >= '1' && c <= '9'),
digs <- optional(digit_sequence());
NumberPrefix::Digits(leading_digit, digs.unwrap_or(""))
}
}
fn leading_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
choice((
attempt(leading_digits_parser()),
attempt(leading_zero_parser()),
))
}
fn number_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do! {
minus_sign <- optional(char('-')),
leading <- leading_parser(),
trail <- optional(trailing_digit_sequence()),
exp <- optional(exponent_parser());
{
Node::Number({
let mut acc = match leading {
NumberPrefix::LeadingZero => 0.0,
NumberPrefix::Digits(leading_digit, l_digs) => {
let mut l = (leading_digit as u8 - b'0') as u64;
for c in l_digs.as_bytes() {
l = l * 10 + (c - b'0') as u64;
}
l as f64
}
};
if let Some(t_digs) = trail {
let mut divider = 1.0;
for c in t_digs.as_bytes() {
divider /= 10.0;
acc += (c - b'0') as f64 * divider;
}
}
if let Some(exponent) = exp {
acc *= exponent;
}
if let Some(_) = minus_sign {
-acc
} else {
acc
}
})
}
}
}
fn bool_parser<'a>() -> impl Parser<&'a str, Output = Node> |
fn null_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
_word <- string("null");
Node::Null
}
}
macro_rules! ref_parser {
($parser_fn:ident) => {
parser(|input| {
let _: &mut &str = input;
$parser_fn().parse_stream(input).into_result()
})
}
}
fn primitive_parser<'a>() -> impl Parser<&'a str, Output = Node> {
let possible_parser = bool_parser()
.or(number_parser())
.or(string_parser())
.or(null_parser())
.or(ref_parser!(array_parser))
.or(ref_parser!(dictionary_parser));
c_hx_do! {
__ <- skip_many(space()),
pars <- possible_parser,
___ <- skip_many(space());
pars
}
}
fn array_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('['),
sep_by(primitive_parser(), char(',')),
char(']')
).map(|nodes: Vec<Node>|
Node::Array(nodes)
)
}
fn pair_parser<'a>() -> impl Parser<&'a str, Output = Option<(SmolStr, Node)>> {
let str_parser = c_hx_do!{
__ <- skip_many(space()),
stp <- string_parser_inner(),
___ <- skip_many(space());
stp
};
c_hx_do!{
l <- str_parser,
__ <- char(':'),
r <- primitive_parser();
Some((l, r))
}
}
fn dictionary_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('{'),
sep_by(pair_parser(), char(',')),
char('}')
).map(|mut pairs: Vec<Option<(SmolStr, Node)>>| {
let mut dict = HashMap::with_capacity(pairs.len());
for mut pair in pairs {
let (l, r) = mem::replace(&mut pair, None).unwrap();
dict.insert(l, r);
}
Node::Object(
dict
)
})
}
fn json_parser<'a>() -> impl Parser<&'a str, Output = Node> {
null_parser()
.or(bool_parser())
.or(number_parser())
.or(string_parser())
.or(array_parser())
.or(dictionary_parser())
}
pub fn parse_json(content: &str) -> Result<Node, String> {
let mut parser = c_hx_do!{
__ <- skip_many(space()),
json <- json_parser(),
___ <- skip_many(space()),
____ <- eof();
json
};
let res = parser.parse(content);
match res {
Err(x) => Err(format!("{}", x.to_string())),
Ok((res,_)) => Ok(res)
}
} | {
c_hx_do!{
word <- string("true").or(string("false"));
match word {
"true" => Node::Boolean(true),
_ => Node::Boolean(false)
}
}
} | identifier_body |
lib.rs | #[macro_use]
extern crate compre_combinee;
extern crate combine;
mod errors;
mod details;
mod traits;
mod stop_watch;
use std::collections::{HashMap};
use combine::{parser, eof, satisfy, choice, attempt};
use combine::parser::range::{take_while1};
use combine::parser::char::*;
use combine::{Parser, many, optional, skip_many, sep_by, between};
pub use crate::errors::ErrorCause;
pub use crate::details::Node;
pub use crate::traits::*;
use std::{f64, mem, str};
use std::convert::TryFrom;
use smol_str::SmolStr;
fn parse_hex<'a>() -> impl Parser<&'a str, Output = u32> {
satisfy(|c: char|
(c >= '0' && c <= '9') ||
(c >= 'a' && c <= 'f') ||
(c >= 'A' && c <= 'F')
).map(|c| if c >= '0' && c <= '9' {
c as u64 - '0' as u64
} else if c >= 'a' && c <= 'f' {
10 + c as u64 - 'a' as u64
} else {
10 + c as u64 - 'A' as u64
} as u32
)
}
fn unicode_char<'a>() -> impl Parser<&'a str, Output = Option<char>> {
c_hx_do!{
__ <- string(r#"\u"#),
d3 <- parse_hex(),
d2 <- parse_hex(),
d1 <- parse_hex(),
d0 <- parse_hex();
{
let unicode = d0 +
0x10 * d1 +
0x100 * d2 +
0x1000 * d3;
char::try_from(unicode).ok()
}
}
}
#[derive(PartialEq)]
enum StringPiece<'a >
{
Ref(&'a str),
Char(Option<char>)
}
fn braced_parser<'a, PBL, P, PBR, O>(pbl: PBL, p: P, pbr: PBR) -> impl Parser<&'a str, Output = O>
where
PBL: Parser<&'a str>,
PBR: Parser<&'a str>,
P: Parser<&'a str, Output = O>
{
between(
c_compre![c; c <- pbl, __ <- skip_many(space())],
c_compre![c; __ <- skip_many(space()), c <- pbr],
p
)
}
fn string_part<'a>() -> impl Parser<&'a str, Output = Vec<StringPiece<'a >>> {
many(
choice(
(
attempt(take_while1(|c: char| c!= '\\' && c!= '"' && c!= '\n' && c!= '\r' && c!= '\t')
.map(|chars: &str| StringPiece::Ref(chars))),
attempt(string("\\\"").map(|_|StringPiece::Ref("\""))),
attempt(string("\\\\").map(|_|StringPiece::Ref("\\"))),
attempt(string("\\n").map(|_|StringPiece::Ref("\n"))),
attempt(string("\\t").map(|_|StringPiece::Ref("\t"))),
attempt(string("\\/").map(|_|StringPiece::Ref("/"))),
attempt(string("\\r").map(|_|StringPiece::Ref("\r"))),
attempt(string("\\f").map(|_|StringPiece::Ref("\u{000c}"))),
attempt(string("\\b").map(|_|StringPiece::Ref("\u{0008}"))),
attempt(unicode_char().map(|s|StringPiece::Char(s))),
)
)
)
}
fn string_parser_inner<'a>() -> impl Parser<&'a str, Output = SmolStr> {
c_hx_do! {
x <- between(char('"'), char('"'), string_part());
{
let cap = x.iter().fold(0, |acc, s|
acc +
match s {
StringPiece::Ref(strref) => strref.len(),
StringPiece::Char(c) => c.map(|c_inner| c_inner.len_utf8()).unwrap_or(0)
}
);
if cap <= 22 {
let mut buf: [u8; 22] = [0; 22];
let mut offset = 0;
for s in x.iter() {
match s {
StringPiece::Ref(strref) => {
for &b in strref.as_bytes() {
buf[offset] = b;
offset += 1;
}
},
StringPiece::Char(c) => {
if let Some(chr) = c {
chr.encode_utf8(&mut buf[offset..]);
offset += chr.len_utf8();
}
}
}
}
return unsafe {
SmolStr::new(str::from_utf8_unchecked(&buf[0..cap]))
};
}
let mut str = String::with_capacity(cap);
for s in x.iter() {
match s {
StringPiece::Ref(strref) => str.push_str(strref),
StringPiece::Char(c) => if let Some(chr) = c { str.push(*chr); }
}
}
SmolStr::new(str)
}
}
}
fn string_parser<'a>() -> impl Parser<&'a str, Output = Node> {
string_parser_inner().map(|x| Node::String(x))
}
fn digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
take_while1(|c: char| c >= '0' && c <= '9')
}
#[inline(always)]
fn power(lhs: f64, rhs: f64) -> f64 {
lhs.powf(rhs)
}
fn trailing_digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
c_hx_do! {
__ <- char('.'),
rest <- digit_sequence();
rest
}
}
fn exponent_parser<'a>() -> impl Parser<&'a str, Output = f64> {
c_hx_do!{
__ <- satisfy(|c: char| c == 'e' || c == 'E'),
sign_char <- optional(satisfy(|c: char| c == '+' || c == '-')),
digits <- digit_sequence();
{
let sign = match sign_char {
Some('-') => -1.0,
_ => 1.0
};
let mut acc = 0;
for c in digits.as_bytes() {
acc = acc * 10 + (c - b'0') as u64;
}
power(10.0, sign * acc as f64)
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum NumberPrefix<'a >
{
LeadingZero,
Digits(char, &'a str)
}
fn leading_zero_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
char('0').map(|_| NumberPrefix::LeadingZero)
}
fn leading_digits_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
c_hx_do! {
leading_digit <- satisfy(|c: char| c >= '1' && c <= '9'),
digs <- optional(digit_sequence());
NumberPrefix::Digits(leading_digit, digs.unwrap_or(""))
}
}
fn leading_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
choice((
attempt(leading_digits_parser()),
attempt(leading_zero_parser()),
))
}
fn number_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do! {
minus_sign <- optional(char('-')),
leading <- leading_parser(),
trail <- optional(trailing_digit_sequence()),
exp <- optional(exponent_parser());
{
Node::Number({
let mut acc = match leading {
NumberPrefix::LeadingZero => 0.0,
NumberPrefix::Digits(leading_digit, l_digs) => {
let mut l = (leading_digit as u8 - b'0') as u64;
for c in l_digs.as_bytes() {
l = l * 10 + (c - b'0') as u64;
}
l as f64
}
};
if let Some(t_digs) = trail {
let mut divider = 1.0;
for c in t_digs.as_bytes() {
divider /= 10.0;
acc += (c - b'0') as f64 * divider;
}
}
if let Some(exponent) = exp {
acc *= exponent;
}
if let Some(_) = minus_sign {
-acc
} else {
acc
}
})
}
}
}
fn | <'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
word <- string("true").or(string("false"));
match word {
"true" => Node::Boolean(true),
_ => Node::Boolean(false)
}
}
}
fn null_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
_word <- string("null");
Node::Null
}
}
macro_rules! ref_parser {
($parser_fn:ident) => {
parser(|input| {
let _: &mut &str = input;
$parser_fn().parse_stream(input).into_result()
})
}
}
fn primitive_parser<'a>() -> impl Parser<&'a str, Output = Node> {
let possible_parser = bool_parser()
.or(number_parser())
.or(string_parser())
.or(null_parser())
.or(ref_parser!(array_parser))
.or(ref_parser!(dictionary_parser));
c_hx_do! {
__ <- skip_many(space()),
pars <- possible_parser,
___ <- skip_many(space());
pars
}
}
fn array_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('['),
sep_by(primitive_parser(), char(',')),
char(']')
).map(|nodes: Vec<Node>|
Node::Array(nodes)
)
}
fn pair_parser<'a>() -> impl Parser<&'a str, Output = Option<(SmolStr, Node)>> {
let str_parser = c_hx_do!{
__ <- skip_many(space()),
stp <- string_parser_inner(),
___ <- skip_many(space());
stp
};
c_hx_do!{
l <- str_parser,
__ <- char(':'),
r <- primitive_parser();
Some((l, r))
}
}
fn dictionary_parser<'a>() -> impl Parser<&'a str, Output = Node> {
braced_parser(
char('{'),
sep_by(pair_parser(), char(',')),
char('}')
).map(|mut pairs: Vec<Option<(SmolStr, Node)>>| {
let mut dict = HashMap::with_capacity(pairs.len());
for mut pair in pairs {
let (l, r) = mem::replace(&mut pair, None).unwrap();
dict.insert(l, r);
}
Node::Object(
dict
)
})
}
fn json_parser<'a>() -> impl Parser<&'a str, Output = Node> {
null_parser()
.or(bool_parser())
.or(number_parser())
.or(string_parser())
.or(array_parser())
.or(dictionary_parser())
}
pub fn parse_json(content: &str) -> Result<Node, String> {
let mut parser = c_hx_do!{
__ <- skip_many(space()),
json <- json_parser(),
___ <- skip_many(space()),
____ <- eof();
json
};
let res = parser.parse(content);
match res {
Err(x) => Err(format!("{}", x.to_string())),
Ok((res,_)) => Ok(res)
}
} | bool_parser | identifier_name |
main.rs | // ~Similar to the ST Heart Rate Sensor example
#![no_main]
#![no_std]
#![allow(non_snake_case)]
use panic_rtt_target as _;
// use panic_halt as _;
use rtt_target::{rprintln, rtt_init_print};
use stm32wb_hal as hal;
use core::time::Duration;
use cortex_m_rt::{entry, exception};
use nb::block;
use byteorder::{ByteOrder, LittleEndian};
use hal::{
flash::FlashExt,
prelude::*,
rcc::{
ApbDivider, Config, HDivider, HseDivider, PllConfig, PllSrc, RfWakeupClock, RtcClkSrc,
StopWakeupClock, SysClkSrc,
},
tl_mbox::{lhci::LhciC1DeviceInformationCcrp, shci::ShciBleInitCmdParam, TlMbox},
};
use bluetooth_hci::{
event::{command::ReturnParameters, Event},
host::{uart::Packet, AdvertisingFilterPolicy, EncryptionKey, Hci, OwnAddressType},
BdAddr,
};
use ble::{perform_command, receive_event, setup_coprocessor, Characteristic, RadioCopro};
use stm32wb55::{
event::{AttReadPermitRequest, AttributeHandle, GattAttributeModified, Stm32Wb5xEvent},
gap::{
AdvertisingDataType, AdvertisingType, AuthenticationRequirements, Commands as GapCommands,
DiscoverableParameters, LocalName, OutOfBandAuthentication, Pin, Role,
},
gatt::{CharacteristicProperty, Commands as GattCommads, UpdateCharacteristicValueParameters},
hal::{Commands as HalCommands, ConfigData, PowerLevel},
};
mod ble;
mod svc_dis;
mod svc_hrs;
mod bt_appearances;
use svc_dis::{uuid, DeviceInformation, DisCharacteristic, DisService};
use svc_hrs::{HrsService, HrsBodySensorLocation, HrsHrmFlags};
use crate::ble::Service;
use crate::svc_hrs::HrsMeasure;
/// Advertisement interval in milliseconds.
const ADV_INTERVAL_MS: u64 = 250;
const BT_NAME: &[u8] = b"KToy";
const BLE_GAP_DEVICE_NAME_LENGTH: u8 = BT_NAME.len() as u8;
// const MY_DEVICE_INFO: DeviceInformation = DeviceInformation {
// fw_revision: Some("fw1.23"),
// manufacturer_name: Some("demo Company"),
// model_number: None,
// serial_number: None,
// system_id: Some("sysid69"),
// ieee_cert: None,
// hw_revision: None,
// sw_revision: None,
// pnp_id: None
// };
//
#[entry]
fn entry() ->! {
//rtt_init_print!(BlockIfFull, 4096);
rtt_init_print!(NoBlockSkip, 4096);
run();
loop {
continue;
}
}
fn run() {
let dp = hal::device::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
rcc.set_stop_wakeup_clock(StopWakeupClock::HSI16);
// Fastest clock configuration.
// * External low-speed crystal is used (LSE)
// * 32 MHz HSE with PLL
// * 64 MHz CPU1, 32 MHz CPU2
// * 64 MHz for APB1, APB2
// * HSI as a clock source after wake-up from low-power mode
let clock_config = Config::new(SysClkSrc::Pll(PllSrc::Hse(HseDivider::NotDivided)))
.with_lse()
.cpu1_hdiv(HDivider::NotDivided)
.cpu2_hdiv(HDivider::Div2)
.apb1_div(ApbDivider::NotDivided)
.apb2_div(ApbDivider::NotDivided)
.pll_cfg(PllConfig {
m: 2,
n: 12,
r: 3,
q: Some(4),
p: Some(3),
})
.rtc_src(RtcClkSrc::Lse)
.rf_wkp_sel(RfWakeupClock::Lse);
let mut rcc = rcc.apply_clock_config(clock_config, &mut dp.FLASH.constrain().acr);
rprintln!("Boot");
// RTC is required for proper operation of BLE stack
let _rtc = hal::rtc::Rtc::rtc(dp.RTC, &mut rcc);
let mut ipcc = dp.IPCC.constrain();
let mbox = TlMbox::tl_init(&mut rcc, &mut ipcc);
let config = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100, | extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
//.expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) ->! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
{
(service_handle, dev_name_handle, appearance_handle)
} else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(|rc: &mut RadioCopro| {
// rc.le_set_scan_response_data(&[])
// .map_err(|_| nb::Error::Other(()))
// })?;
return Ok(dis_service);
}
// https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8
// except we have no std....
// unsafe fn any_as_u8_slice<T: Sized>(p: &T) -> &[u8] {
// ::std::slice::from_raw_parts(
// (p as *const T) as *const u8,
// ::std::mem::size_of::<T>(),
// )
// }
fn init_hrs() -> Result<HrsService, ()> {
// analog to hrs_init
let hrs_service = HrsService::new(true, true, true)?;
// analog to hrsapp_init...
if hrs_service.with_location {
let loc = HrsBodySensorLocation::Finger as u8;
hrs_service.body_sensor_location.as_ref().unwrap().set_value(&loc.to_le_bytes());
}
let mut hrs_measure = HrsMeasure {
value: 1,
energy_expended: 100,
aRR_interval_values: [200],
valid_intervals: 1,
flags: HrsHrmFlags::VALUE_FORMAT_UINT16 | HrsHrmFlags::SENSOR_CONTACTS_PRESENT | HrsHrmFlags::SENSOR_CONTACTS_SUPPORTED | HrsHrmFlags::ENERGY_EXPENDED_PRESENT | HrsHrmFlags::RR_INTERVAL_PRESENT,
};
// TODO We need to keep that hrs_measure around somewhere, and get our task to start processing periodic events for it....
let mut bytes:[u8;8] = [0; 8];
LittleEndian::write_u16(&mut bytes[0..2], hrs_measure.value);
//bytes[0..2] = *hrs_measure.value.to_le_bytes();
LittleEndian::write_u16(&mut bytes[2..4], hrs_measure.energy_expended);
LittleEndian::write_u16(&mut bytes[4..6], hrs_measure.aRR_interval_values[0]);
bytes[6] = hrs_measure.valid_intervals;
bytes[7] = hrs_measure.flags.bits();
hrs_service.heart_rate_measurement.set_value(&bytes);
return Ok(hrs_service);
}
fn get_bd_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = lhci_info.device_type_id;
bytes[4] = (lhci_info.st_company_id & 0xff) as u8;
bytes[5] = (lhci_info.st_company_id >> 8 & 0xff) as u8;
BdAddr(bytes)
}
fn get_random_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = 0;
bytes[4] = 0x6E;
bytes[5] = 0xED;
BdAddr(bytes)
}
const BLE_CFG_IRK: [u8; 16] = [
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
];
const BLE_CFG_ERK: [u8; 16] = [
0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21, 0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21,
];
fn get_irk() -> EncryptionKey {
EncryptionKey(BLE_CFG_IRK)
}
fn get_erk() -> EncryptionKey {
EncryptionKey(BLE_CFG_ERK)
}
const DISCOVERY_PARAMS: DiscoverableParameters = DiscoverableParameters {
advertising_type: AdvertisingType::ConnectableUndirected,
advertising_interval: Some((
Duration::from_millis(ADV_INTERVAL_MS),
Duration::from_millis(ADV_INTERVAL_MS),
)),
address_type: OwnAddressType::Public,
filter_policy: AdvertisingFilterPolicy::AllowConnectionAndScan,
// Local name should be empty for the device to be recognized as an Eddystone beacon
local_name: Some(LocalName::Complete(BT_NAME)),
advertising_data: &[],
conn_interval: (None, None),
}; | num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8, | random_line_split |
main.rs | // ~Similar to the ST Heart Rate Sensor example
#![no_main]
#![no_std]
#![allow(non_snake_case)]
use panic_rtt_target as _;
// use panic_halt as _;
use rtt_target::{rprintln, rtt_init_print};
use stm32wb_hal as hal;
use core::time::Duration;
use cortex_m_rt::{entry, exception};
use nb::block;
use byteorder::{ByteOrder, LittleEndian};
use hal::{
flash::FlashExt,
prelude::*,
rcc::{
ApbDivider, Config, HDivider, HseDivider, PllConfig, PllSrc, RfWakeupClock, RtcClkSrc,
StopWakeupClock, SysClkSrc,
},
tl_mbox::{lhci::LhciC1DeviceInformationCcrp, shci::ShciBleInitCmdParam, TlMbox},
};
use bluetooth_hci::{
event::{command::ReturnParameters, Event},
host::{uart::Packet, AdvertisingFilterPolicy, EncryptionKey, Hci, OwnAddressType},
BdAddr,
};
use ble::{perform_command, receive_event, setup_coprocessor, Characteristic, RadioCopro};
use stm32wb55::{
event::{AttReadPermitRequest, AttributeHandle, GattAttributeModified, Stm32Wb5xEvent},
gap::{
AdvertisingDataType, AdvertisingType, AuthenticationRequirements, Commands as GapCommands,
DiscoverableParameters, LocalName, OutOfBandAuthentication, Pin, Role,
},
gatt::{CharacteristicProperty, Commands as GattCommads, UpdateCharacteristicValueParameters},
hal::{Commands as HalCommands, ConfigData, PowerLevel},
};
mod ble;
mod svc_dis;
mod svc_hrs;
mod bt_appearances;
use svc_dis::{uuid, DeviceInformation, DisCharacteristic, DisService};
use svc_hrs::{HrsService, HrsBodySensorLocation, HrsHrmFlags};
use crate::ble::Service;
use crate::svc_hrs::HrsMeasure;
/// Advertisement interval in milliseconds.
const ADV_INTERVAL_MS: u64 = 250;
const BT_NAME: &[u8] = b"KToy";
const BLE_GAP_DEVICE_NAME_LENGTH: u8 = BT_NAME.len() as u8;
// const MY_DEVICE_INFO: DeviceInformation = DeviceInformation {
// fw_revision: Some("fw1.23"),
// manufacturer_name: Some("demo Company"),
// model_number: None,
// serial_number: None,
// system_id: Some("sysid69"),
// ieee_cert: None,
// hw_revision: None,
// sw_revision: None,
// pnp_id: None
// };
//
#[entry]
fn entry() ->! {
//rtt_init_print!(BlockIfFull, 4096);
rtt_init_print!(NoBlockSkip, 4096);
run();
loop {
continue;
}
}
fn run() {
let dp = hal::device::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
rcc.set_stop_wakeup_clock(StopWakeupClock::HSI16);
// Fastest clock configuration.
// * External low-speed crystal is used (LSE)
// * 32 MHz HSE with PLL
// * 64 MHz CPU1, 32 MHz CPU2
// * 64 MHz for APB1, APB2
// * HSI as a clock source after wake-up from low-power mode
let clock_config = Config::new(SysClkSrc::Pll(PllSrc::Hse(HseDivider::NotDivided)))
.with_lse()
.cpu1_hdiv(HDivider::NotDivided)
.cpu2_hdiv(HDivider::Div2)
.apb1_div(ApbDivider::NotDivided)
.apb2_div(ApbDivider::NotDivided)
.pll_cfg(PllConfig {
m: 2,
n: 12,
r: 3,
q: Some(4),
p: Some(3),
})
.rtc_src(RtcClkSrc::Lse)
.rf_wkp_sel(RfWakeupClock::Lse);
let mut rcc = rcc.apply_clock_config(clock_config, &mut dp.FLASH.constrain().acr);
rprintln!("Boot");
// RTC is required for proper operation of BLE stack
let _rtc = hal::rtc::Rtc::rtc(dp.RTC, &mut rcc);
let mut ipcc = dp.IPCC.constrain();
let mbox = TlMbox::tl_init(&mut rcc, &mut ipcc);
let config = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100,
num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8,
extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
//.expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) ->! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
| else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(|rc: &mut RadioCopro| {
// rc.le_set_scan_response_data(&[])
// .map_err(|_| nb::Error::Other(()))
// })?;
return Ok(dis_service);
}
// https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8
// except we have no std....
// unsafe fn any_as_u8_slice<T: Sized>(p: &T) -> &[u8] {
// ::std::slice::from_raw_parts(
// (p as *const T) as *const u8,
// ::std::mem::size_of::<T>(),
// )
// }
fn init_hrs() -> Result<HrsService, ()> {
// analog to hrs_init
let hrs_service = HrsService::new(true, true, true)?;
// analog to hrsapp_init...
if hrs_service.with_location {
let loc = HrsBodySensorLocation::Finger as u8;
hrs_service.body_sensor_location.as_ref().unwrap().set_value(&loc.to_le_bytes());
}
let mut hrs_measure = HrsMeasure {
value: 1,
energy_expended: 100,
aRR_interval_values: [200],
valid_intervals: 1,
flags: HrsHrmFlags::VALUE_FORMAT_UINT16 | HrsHrmFlags::SENSOR_CONTACTS_PRESENT | HrsHrmFlags::SENSOR_CONTACTS_SUPPORTED | HrsHrmFlags::ENERGY_EXPENDED_PRESENT | HrsHrmFlags::RR_INTERVAL_PRESENT,
};
// TODO We need to keep that hrs_measure around somewhere, and get our task to start processing periodic events for it....
let mut bytes:[u8;8] = [0; 8];
LittleEndian::write_u16(&mut bytes[0..2], hrs_measure.value);
//bytes[0..2] = *hrs_measure.value.to_le_bytes();
LittleEndian::write_u16(&mut bytes[2..4], hrs_measure.energy_expended);
LittleEndian::write_u16(&mut bytes[4..6], hrs_measure.aRR_interval_values[0]);
bytes[6] = hrs_measure.valid_intervals;
bytes[7] = hrs_measure.flags.bits();
hrs_service.heart_rate_measurement.set_value(&bytes);
return Ok(hrs_service);
}
fn get_bd_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = lhci_info.device_type_id;
bytes[4] = (lhci_info.st_company_id & 0xff) as u8;
bytes[5] = (lhci_info.st_company_id >> 8 & 0xff) as u8;
BdAddr(bytes)
}
fn get_random_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = 0;
bytes[4] = 0x6E;
bytes[5] = 0xED;
BdAddr(bytes)
}
const BLE_CFG_IRK: [u8; 16] = [
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
];
const BLE_CFG_ERK: [u8; 16] = [
0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21, 0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21,
];
fn get_irk() -> EncryptionKey {
EncryptionKey(BLE_CFG_IRK)
}
fn get_erk() -> EncryptionKey {
EncryptionKey(BLE_CFG_ERK)
}
const DISCOVERY_PARAMS: DiscoverableParameters = DiscoverableParameters {
advertising_type: AdvertisingType::ConnectableUndirected,
advertising_interval: Some((
Duration::from_millis(ADV_INTERVAL_MS),
Duration::from_millis(ADV_INTERVAL_MS),
)),
address_type: OwnAddressType::Public,
filter_policy: AdvertisingFilterPolicy::AllowConnectionAndScan,
// Local name should be empty for the device to be recognized as an Eddystone beacon
local_name: Some(LocalName::Complete(BT_NAME)),
advertising_data: &[],
conn_interval: (None, None),
};
| {
(service_handle, dev_name_handle, appearance_handle)
} | conditional_block |
main.rs | // ~Similar to the ST Heart Rate Sensor example
#![no_main]
#![no_std]
#![allow(non_snake_case)]
use panic_rtt_target as _;
// use panic_halt as _;
use rtt_target::{rprintln, rtt_init_print};
use stm32wb_hal as hal;
use core::time::Duration;
use cortex_m_rt::{entry, exception};
use nb::block;
use byteorder::{ByteOrder, LittleEndian};
use hal::{
flash::FlashExt,
prelude::*,
rcc::{
ApbDivider, Config, HDivider, HseDivider, PllConfig, PllSrc, RfWakeupClock, RtcClkSrc,
StopWakeupClock, SysClkSrc,
},
tl_mbox::{lhci::LhciC1DeviceInformationCcrp, shci::ShciBleInitCmdParam, TlMbox},
};
use bluetooth_hci::{
event::{command::ReturnParameters, Event},
host::{uart::Packet, AdvertisingFilterPolicy, EncryptionKey, Hci, OwnAddressType},
BdAddr,
};
use ble::{perform_command, receive_event, setup_coprocessor, Characteristic, RadioCopro};
use stm32wb55::{
event::{AttReadPermitRequest, AttributeHandle, GattAttributeModified, Stm32Wb5xEvent},
gap::{
AdvertisingDataType, AdvertisingType, AuthenticationRequirements, Commands as GapCommands,
DiscoverableParameters, LocalName, OutOfBandAuthentication, Pin, Role,
},
gatt::{CharacteristicProperty, Commands as GattCommads, UpdateCharacteristicValueParameters},
hal::{Commands as HalCommands, ConfigData, PowerLevel},
};
mod ble;
mod svc_dis;
mod svc_hrs;
mod bt_appearances;
use svc_dis::{uuid, DeviceInformation, DisCharacteristic, DisService};
use svc_hrs::{HrsService, HrsBodySensorLocation, HrsHrmFlags};
use crate::ble::Service;
use crate::svc_hrs::HrsMeasure;
/// Advertisement interval in milliseconds.
const ADV_INTERVAL_MS: u64 = 250;
const BT_NAME: &[u8] = b"KToy";
const BLE_GAP_DEVICE_NAME_LENGTH: u8 = BT_NAME.len() as u8;
// const MY_DEVICE_INFO: DeviceInformation = DeviceInformation {
// fw_revision: Some("fw1.23"),
// manufacturer_name: Some("demo Company"),
// model_number: None,
// serial_number: None,
// system_id: Some("sysid69"),
// ieee_cert: None,
// hw_revision: None,
// sw_revision: None,
// pnp_id: None
// };
//
#[entry]
fn entry() ->! {
//rtt_init_print!(BlockIfFull, 4096);
rtt_init_print!(NoBlockSkip, 4096);
run();
loop {
continue;
}
}
fn run() {
let dp = hal::device::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
rcc.set_stop_wakeup_clock(StopWakeupClock::HSI16);
// Fastest clock configuration.
// * External low-speed crystal is used (LSE)
// * 32 MHz HSE with PLL
// * 64 MHz CPU1, 32 MHz CPU2
// * 64 MHz for APB1, APB2
// * HSI as a clock source after wake-up from low-power mode
let clock_config = Config::new(SysClkSrc::Pll(PllSrc::Hse(HseDivider::NotDivided)))
.with_lse()
.cpu1_hdiv(HDivider::NotDivided)
.cpu2_hdiv(HDivider::Div2)
.apb1_div(ApbDivider::NotDivided)
.apb2_div(ApbDivider::NotDivided)
.pll_cfg(PllConfig {
m: 2,
n: 12,
r: 3,
q: Some(4),
p: Some(3),
})
.rtc_src(RtcClkSrc::Lse)
.rf_wkp_sel(RfWakeupClock::Lse);
let mut rcc = rcc.apply_clock_config(clock_config, &mut dp.FLASH.constrain().acr);
rprintln!("Boot");
// RTC is required for proper operation of BLE stack
let _rtc = hal::rtc::Rtc::rtc(dp.RTC, &mut rcc);
let mut ipcc = dp.IPCC.constrain();
let mbox = TlMbox::tl_init(&mut rcc, &mut ipcc);
let config = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100,
num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8,
extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
//.expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) ->! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
{
(service_handle, dev_name_handle, appearance_handle)
} else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(|rc: &mut RadioCopro| {
// rc.le_set_scan_response_data(&[])
// .map_err(|_| nb::Error::Other(()))
// })?;
return Ok(dis_service);
}
// https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8
// except we have no std....
// unsafe fn any_as_u8_slice<T: Sized>(p: &T) -> &[u8] {
// ::std::slice::from_raw_parts(
// (p as *const T) as *const u8,
// ::std::mem::size_of::<T>(),
// )
// }
fn init_hrs() -> Result<HrsService, ()> {
// analog to hrs_init
let hrs_service = HrsService::new(true, true, true)?;
// analog to hrsapp_init...
if hrs_service.with_location {
let loc = HrsBodySensorLocation::Finger as u8;
hrs_service.body_sensor_location.as_ref().unwrap().set_value(&loc.to_le_bytes());
}
let mut hrs_measure = HrsMeasure {
value: 1,
energy_expended: 100,
aRR_interval_values: [200],
valid_intervals: 1,
flags: HrsHrmFlags::VALUE_FORMAT_UINT16 | HrsHrmFlags::SENSOR_CONTACTS_PRESENT | HrsHrmFlags::SENSOR_CONTACTS_SUPPORTED | HrsHrmFlags::ENERGY_EXPENDED_PRESENT | HrsHrmFlags::RR_INTERVAL_PRESENT,
};
// TODO We need to keep that hrs_measure around somewhere, and get our task to start processing periodic events for it....
let mut bytes:[u8;8] = [0; 8];
LittleEndian::write_u16(&mut bytes[0..2], hrs_measure.value);
//bytes[0..2] = *hrs_measure.value.to_le_bytes();
LittleEndian::write_u16(&mut bytes[2..4], hrs_measure.energy_expended);
LittleEndian::write_u16(&mut bytes[4..6], hrs_measure.aRR_interval_values[0]);
bytes[6] = hrs_measure.valid_intervals;
bytes[7] = hrs_measure.flags.bits();
hrs_service.heart_rate_measurement.set_value(&bytes);
return Ok(hrs_service);
}
fn | () -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = lhci_info.device_type_id;
bytes[4] = (lhci_info.st_company_id & 0xff) as u8;
bytes[5] = (lhci_info.st_company_id >> 8 & 0xff) as u8;
BdAddr(bytes)
}
fn get_random_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = 0;
bytes[4] = 0x6E;
bytes[5] = 0xED;
BdAddr(bytes)
}
const BLE_CFG_IRK: [u8; 16] = [
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
];
const BLE_CFG_ERK: [u8; 16] = [
0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21, 0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21,
];
fn get_irk() -> EncryptionKey {
EncryptionKey(BLE_CFG_IRK)
}
fn get_erk() -> EncryptionKey {
EncryptionKey(BLE_CFG_ERK)
}
const DISCOVERY_PARAMS: DiscoverableParameters = DiscoverableParameters {
advertising_type: AdvertisingType::ConnectableUndirected,
advertising_interval: Some((
Duration::from_millis(ADV_INTERVAL_MS),
Duration::from_millis(ADV_INTERVAL_MS),
)),
address_type: OwnAddressType::Public,
filter_policy: AdvertisingFilterPolicy::AllowConnectionAndScan,
// Local name should be empty for the device to be recognized as an Eddystone beacon
local_name: Some(LocalName::Complete(BT_NAME)),
advertising_data: &[],
conn_interval: (None, None),
};
| get_bd_addr | identifier_name |
main.rs | // ~Similar to the ST Heart Rate Sensor example
#![no_main]
#![no_std]
#![allow(non_snake_case)]
use panic_rtt_target as _;
// use panic_halt as _;
use rtt_target::{rprintln, rtt_init_print};
use stm32wb_hal as hal;
use core::time::Duration;
use cortex_m_rt::{entry, exception};
use nb::block;
use byteorder::{ByteOrder, LittleEndian};
use hal::{
flash::FlashExt,
prelude::*,
rcc::{
ApbDivider, Config, HDivider, HseDivider, PllConfig, PllSrc, RfWakeupClock, RtcClkSrc,
StopWakeupClock, SysClkSrc,
},
tl_mbox::{lhci::LhciC1DeviceInformationCcrp, shci::ShciBleInitCmdParam, TlMbox},
};
use bluetooth_hci::{
event::{command::ReturnParameters, Event},
host::{uart::Packet, AdvertisingFilterPolicy, EncryptionKey, Hci, OwnAddressType},
BdAddr,
};
use ble::{perform_command, receive_event, setup_coprocessor, Characteristic, RadioCopro};
use stm32wb55::{
event::{AttReadPermitRequest, AttributeHandle, GattAttributeModified, Stm32Wb5xEvent},
gap::{
AdvertisingDataType, AdvertisingType, AuthenticationRequirements, Commands as GapCommands,
DiscoverableParameters, LocalName, OutOfBandAuthentication, Pin, Role,
},
gatt::{CharacteristicProperty, Commands as GattCommads, UpdateCharacteristicValueParameters},
hal::{Commands as HalCommands, ConfigData, PowerLevel},
};
mod ble;
mod svc_dis;
mod svc_hrs;
mod bt_appearances;
use svc_dis::{uuid, DeviceInformation, DisCharacteristic, DisService};
use svc_hrs::{HrsService, HrsBodySensorLocation, HrsHrmFlags};
use crate::ble::Service;
use crate::svc_hrs::HrsMeasure;
/// Advertisement interval in milliseconds.
const ADV_INTERVAL_MS: u64 = 250;
const BT_NAME: &[u8] = b"KToy";
const BLE_GAP_DEVICE_NAME_LENGTH: u8 = BT_NAME.len() as u8;
// const MY_DEVICE_INFO: DeviceInformation = DeviceInformation {
// fw_revision: Some("fw1.23"),
// manufacturer_name: Some("demo Company"),
// model_number: None,
// serial_number: None,
// system_id: Some("sysid69"),
// ieee_cert: None,
// hw_revision: None,
// sw_revision: None,
// pnp_id: None
// };
//
#[entry]
fn entry() ->! {
//rtt_init_print!(BlockIfFull, 4096);
rtt_init_print!(NoBlockSkip, 4096);
run();
loop {
continue;
}
}
fn run() {
let dp = hal::device::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
rcc.set_stop_wakeup_clock(StopWakeupClock::HSI16);
// Fastest clock configuration.
// * External low-speed crystal is used (LSE)
// * 32 MHz HSE with PLL
// * 64 MHz CPU1, 32 MHz CPU2
// * 64 MHz for APB1, APB2
// * HSI as a clock source after wake-up from low-power mode
let clock_config = Config::new(SysClkSrc::Pll(PllSrc::Hse(HseDivider::NotDivided)))
.with_lse()
.cpu1_hdiv(HDivider::NotDivided)
.cpu2_hdiv(HDivider::Div2)
.apb1_div(ApbDivider::NotDivided)
.apb2_div(ApbDivider::NotDivided)
.pll_cfg(PllConfig {
m: 2,
n: 12,
r: 3,
q: Some(4),
p: Some(3),
})
.rtc_src(RtcClkSrc::Lse)
.rf_wkp_sel(RfWakeupClock::Lse);
let mut rcc = rcc.apply_clock_config(clock_config, &mut dp.FLASH.constrain().acr);
rprintln!("Boot");
// RTC is required for proper operation of BLE stack
let _rtc = hal::rtc::Rtc::rtc(dp.RTC, &mut rcc);
let mut ipcc = dp.IPCC.constrain();
let mbox = TlMbox::tl_init(&mut rcc, &mut ipcc);
let config = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100,
num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8,
extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
//.expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) ->! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
{
(service_handle, dev_name_handle, appearance_handle)
} else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(|rc: &mut RadioCopro| {
// rc.le_set_scan_response_data(&[])
// .map_err(|_| nb::Error::Other(()))
// })?;
return Ok(dis_service);
}
// https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8
// except we have no std....
// unsafe fn any_as_u8_slice<T: Sized>(p: &T) -> &[u8] {
// ::std::slice::from_raw_parts(
// (p as *const T) as *const u8,
// ::std::mem::size_of::<T>(),
// )
// }
fn init_hrs() -> Result<HrsService, ()> {
// analog to hrs_init
let hrs_service = HrsService::new(true, true, true)?;
// analog to hrsapp_init...
if hrs_service.with_location {
let loc = HrsBodySensorLocation::Finger as u8;
hrs_service.body_sensor_location.as_ref().unwrap().set_value(&loc.to_le_bytes());
}
let mut hrs_measure = HrsMeasure {
value: 1,
energy_expended: 100,
aRR_interval_values: [200],
valid_intervals: 1,
flags: HrsHrmFlags::VALUE_FORMAT_UINT16 | HrsHrmFlags::SENSOR_CONTACTS_PRESENT | HrsHrmFlags::SENSOR_CONTACTS_SUPPORTED | HrsHrmFlags::ENERGY_EXPENDED_PRESENT | HrsHrmFlags::RR_INTERVAL_PRESENT,
};
// TODO We need to keep that hrs_measure around somewhere, and get our task to start processing periodic events for it....
let mut bytes:[u8;8] = [0; 8];
LittleEndian::write_u16(&mut bytes[0..2], hrs_measure.value);
//bytes[0..2] = *hrs_measure.value.to_le_bytes();
LittleEndian::write_u16(&mut bytes[2..4], hrs_measure.energy_expended);
LittleEndian::write_u16(&mut bytes[4..6], hrs_measure.aRR_interval_values[0]);
bytes[6] = hrs_measure.valid_intervals;
bytes[7] = hrs_measure.flags.bits();
hrs_service.heart_rate_measurement.set_value(&bytes);
return Ok(hrs_service);
}
fn get_bd_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = lhci_info.device_type_id;
bytes[4] = (lhci_info.st_company_id & 0xff) as u8;
bytes[5] = (lhci_info.st_company_id >> 8 & 0xff) as u8;
BdAddr(bytes)
}
fn get_random_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = 0;
bytes[4] = 0x6E;
bytes[5] = 0xED;
BdAddr(bytes)
}
const BLE_CFG_IRK: [u8; 16] = [
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
];
const BLE_CFG_ERK: [u8; 16] = [
0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21, 0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21,
];
fn get_irk() -> EncryptionKey {
EncryptionKey(BLE_CFG_IRK)
}
fn get_erk() -> EncryptionKey |
const DISCOVERY_PARAMS: DiscoverableParameters = DiscoverableParameters {
advertising_type: AdvertisingType::ConnectableUndirected,
advertising_interval: Some((
Duration::from_millis(ADV_INTERVAL_MS),
Duration::from_millis(ADV_INTERVAL_MS),
)),
address_type: OwnAddressType::Public,
filter_policy: AdvertisingFilterPolicy::AllowConnectionAndScan,
// Local name should be empty for the device to be recognized as an Eddystone beacon
local_name: Some(LocalName::Complete(BT_NAME)),
advertising_data: &[],
conn_interval: (None, None),
};
| {
EncryptionKey(BLE_CFG_ERK)
} | identifier_body |
main.rs | // Rust notes
fn main () {
let mut x = String::from("Hey there");
let mut y = String::from("Hello dawg woof");
println!("{}",first_word(&x));
println!("{}",second_word(&y));
}
fn | (x : &String) -> &str {
let bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &x[0..i];
}
}
&x[..]
}
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' '{
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
}
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch", 20.5, 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//---------------------------------------------//
// fn main() {
// let x = 5;
// print_number(x);
// if is_even(x){
// println!("Is even!");
// }
// else {
// println!("Is odd!", );
// }
// }
// fn print_number(num:u32){
// println!("number is {}", num);
// }
// // fn accepts u32 num and "returns" a bool
// fn is_even(num:u32) -> bool{
// return (num % 2 == 0);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Shadowing ***
//---------------------------------------------//
// fn main() {
// let mut x = 20;
// {
// let x = 15;
// // do stuff with this 15, then x will go back to original value, called "shadowing"
// }
// // will print out 20
// println!("{}", x);
// }
//------------------------------------------------------//
//*** Shadowing Cont... *** //
// Info: This allows for an immutable var to //
// occasionally be redifined, but //
// to in general still stay an immutable var //
//------------------------------------------------------//
// fn main() {
// let x = 5;
// let x = x + 1;
// let x = x * 2;
// println!("The value of x is: {}", x);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Guessing Game Program ***
//*** Info: Demonstrates usage of rand crate and ***
//*** some basic error handling ***
//---------------------------------------------//
// use std::cmp::Ordering;
// use std:: io;
// use rand:: Rng;
// fn main() {
// // to learn how to use the crates, like rand, do cargo doc --open in the command line to open up documentation in your browser
// let secret_num = rand::thread_rng().gen_range(1, 101);
// loop {
// println!("Guess the number!");
// println!("Input your guess:");
// // A new instance of a String, which is growable and UTF-8 encoded text
// // '::' means new is an associated function of the String type, not the instance of the string. Similar to a static method in other languages.
// let mut guess = String::new();
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it may or may not print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut guess)
// .expect("Failed to read"); //NOTE: will default to type i32
// // trim eliminates new lines and whitespaces that might exist in the string we are attempting to cast to an u32
// let guess: u32 = match guess.trim().parse() {
// Ok(num) => num,
// Err(_)=> continue, //the '_' is a catchall val that will continue if parse's result type return val is Err
// };
// println!("User guessed: {}", guess );
// match guess.cmp(&secret_num){
// Ordering::Less => println!("Too small!"),
// Ordering::Greater => println!("Too large!"),
// Ordering::Equal =>
// {
// println!("Correct!");
// break;
// }
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** References ***
//---------------------------------------------//
// fn main() {
// let mut jacob = 21;
// // creating a REFERENCE to jacob, will behave exactly as 'jacob' does
// //let jacobR = &jacob;
// // creating a mutable REFERENCE to jacob, that can be changed and WILL change 'jacob's value
// let jacobRM = &mut jacob;
// *jacobRM = 22;
// println!("{}", jacob );
// }
//__________________________________________________________________________________________________________ //
//* ANOTHER REFERENCES EXAMPLE *
// fn main() {
// let s1 = String::from("hello");
// let len = calculate_length(&s1);
// println!("The length of '{}' is {}.", s1, len);
// }
// fn calculate_length(s: &String) -> usize {
// s.len()
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Range and Reverse in a For Loop ***
//---------------------------------------------//
// fn main() {
// for number in (1..4).rev() {
// println!("{}!", number);
// }
// println!("LIFTOFF!!!");
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Cloning ***
// info: So that s1 and s2 dont point to the same
// piece of memory, is more expensive than the
// normal copying, which is really a "move"
//---------------------------------------------//
// fn main() {
// let s1 = String::from("hello");
// let s2 = s1.clone();
// println!("s1 = {}, s2 = {}", s1, s2);
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Find Nth Fibonacci Number ***
//---------------------------------------------//
// use std::io;
// //use std::cmp::Ordering;
// fn main() {
// let mut num = String::new();
// println!("What nth Fibonacci number would you like to find?");
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it will print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut num)
// .expect("Failed to read"); //NOTE: will default to type i32
// let num: u32 = num.trim().parse().expect("Was not an integer!");
// let nth = fib_num(num);
// println!("The nth Fibonnacci Number was: {}", nth );
// }
// fn fib_num(x: u32) -> u32{
// if x <= 1 {
// return x;
// }
// return fib_num(x-1) + fib_num(x-2);
// }
//
//__________________________________________________________________________________________________________ //
//
//---------------------------------------------------------------------------------------//
//*** SLICES: example, return first word in string, and second word in string function ***
//--------------------------------------------------------------------------------------//
//
// fn main () {
// let mut x = String::from("Hey there");
// let mut y = String::from("Hello dawg woof");
// println!("{}",first_word(&x));
// println!("{}",second_word(&y));
// }
// fn first_word(x : &String) -> &str {
// let bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &x[0..i];
// }
// }
// &x[..]
// }
// fn second_word(x : &String) -> &str {
// let mut bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b' '{
// let y = &x[i+1..];
// bytes = y.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &y[0..i];
// }
// }
// // Return this IF there were only two words.
// return &y[..];
// }
// }
// &x[..]
// } | first_word | identifier_name |
main.rs | // Rust notes
fn main () {
let mut x = String::from("Hey there");
let mut y = String::from("Hello dawg woof");
println!("{}",first_word(&x));
println!("{}",second_word(&y));
}
fn first_word(x : &String) -> &str |
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' '{
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
}
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch", 20.5, 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//---------------------------------------------//
// fn main() {
// let x = 5;
// print_number(x);
// if is_even(x){
// println!("Is even!");
// }
// else {
// println!("Is odd!", );
// }
// }
// fn print_number(num:u32){
// println!("number is {}", num);
// }
// // fn accepts u32 num and "returns" a bool
// fn is_even(num:u32) -> bool{
// return (num % 2 == 0);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Shadowing ***
//---------------------------------------------//
// fn main() {
// let mut x = 20;
// {
// let x = 15;
// // do stuff with this 15, then x will go back to original value, called "shadowing"
// }
// // will print out 20
// println!("{}", x);
// }
//------------------------------------------------------//
//*** Shadowing Cont... *** //
// Info: This allows for an immutable var to //
// occasionally be redifined, but //
// to in general still stay an immutable var //
//------------------------------------------------------//
// fn main() {
// let x = 5;
// let x = x + 1;
// let x = x * 2;
// println!("The value of x is: {}", x);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Guessing Game Program ***
//*** Info: Demonstrates usage of rand crate and ***
//*** some basic error handling ***
//---------------------------------------------//
// use std::cmp::Ordering;
// use std:: io;
// use rand:: Rng;
// fn main() {
// // to learn how to use the crates, like rand, do cargo doc --open in the command line to open up documentation in your browser
// let secret_num = rand::thread_rng().gen_range(1, 101);
// loop {
// println!("Guess the number!");
// println!("Input your guess:");
// // A new instance of a String, which is growable and UTF-8 encoded text
// // '::' means new is an associated function of the String type, not the instance of the string. Similar to a static method in other languages.
// let mut guess = String::new();
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it may or may not print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut guess)
// .expect("Failed to read"); //NOTE: will default to type i32
// // trim eliminates new lines and whitespaces that might exist in the string we are attempting to cast to an u32
// let guess: u32 = match guess.trim().parse() {
// Ok(num) => num,
// Err(_)=> continue, //the '_' is a catchall val that will continue if parse's result type return val is Err
// };
// println!("User guessed: {}", guess );
// match guess.cmp(&secret_num){
// Ordering::Less => println!("Too small!"),
// Ordering::Greater => println!("Too large!"),
// Ordering::Equal =>
// {
// println!("Correct!");
// break;
// }
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** References ***
//---------------------------------------------//
// fn main() {
// let mut jacob = 21;
// // creating a REFERENCE to jacob, will behave exactly as 'jacob' does
// //let jacobR = &jacob;
// // creating a mutable REFERENCE to jacob, that can be changed and WILL change 'jacob's value
// let jacobRM = &mut jacob;
// *jacobRM = 22;
// println!("{}", jacob );
// }
//__________________________________________________________________________________________________________ //
//* ANOTHER REFERENCES EXAMPLE *
// fn main() {
// let s1 = String::from("hello");
// let len = calculate_length(&s1);
// println!("The length of '{}' is {}.", s1, len);
// }
// fn calculate_length(s: &String) -> usize {
// s.len()
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Range and Reverse in a For Loop ***
//---------------------------------------------//
// fn main() {
// for number in (1..4).rev() {
// println!("{}!", number);
// }
// println!("LIFTOFF!!!");
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Cloning ***
// info: So that s1 and s2 dont point to the same
// piece of memory, is more expensive than the
// normal copying, which is really a "move"
//---------------------------------------------//
// fn main() {
// let s1 = String::from("hello");
// let s2 = s1.clone();
// println!("s1 = {}, s2 = {}", s1, s2);
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Find Nth Fibonacci Number ***
//---------------------------------------------//
// use std::io;
// //use std::cmp::Ordering;
// fn main() {
// let mut num = String::new();
// println!("What nth Fibonacci number would you like to find?");
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it will print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut num)
// .expect("Failed to read"); //NOTE: will default to type i32
// let num: u32 = num.trim().parse().expect("Was not an integer!");
// let nth = fib_num(num);
// println!("The nth Fibonnacci Number was: {}", nth );
// }
// fn fib_num(x: u32) -> u32{
// if x <= 1 {
// return x;
// }
// return fib_num(x-1) + fib_num(x-2);
// }
//
//__________________________________________________________________________________________________________ //
//
//---------------------------------------------------------------------------------------//
//*** SLICES: example, return first word in string, and second word in string function ***
//--------------------------------------------------------------------------------------//
//
// fn main () {
// let mut x = String::from("Hey there");
// let mut y = String::from("Hello dawg woof");
// println!("{}",first_word(&x));
// println!("{}",second_word(&y));
// }
// fn first_word(x : &String) -> &str {
// let bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &x[0..i];
// }
// }
// &x[..]
// }
// fn second_word(x : &String) -> &str {
// let mut bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b' '{
// let y = &x[i+1..];
// bytes = y.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &y[0..i];
// }
// }
// // Return this IF there were only two words.
// return &y[..];
// }
// }
// &x[..]
// } | {
let bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &x[0..i];
}
}
&x[..]
} | identifier_body |
main.rs | // Rust notes
fn main () {
let mut x = String::from("Hey there");
let mut y = String::from("Hello dawg woof");
println!("{}",first_word(&x));
println!("{}",second_word(&y));
}
fn first_word(x : &String) -> &str {
let bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &x[0..i];
}
} | &x[..]
}
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' '{
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
}
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch", 20.5, 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//---------------------------------------------//
// fn main() {
// let x = 5;
// print_number(x);
// if is_even(x){
// println!("Is even!");
// }
// else {
// println!("Is odd!", );
// }
// }
// fn print_number(num:u32){
// println!("number is {}", num);
// }
// // fn accepts u32 num and "returns" a bool
// fn is_even(num:u32) -> bool{
// return (num % 2 == 0);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Shadowing ***
//---------------------------------------------//
// fn main() {
// let mut x = 20;
// {
// let x = 15;
// // do stuff with this 15, then x will go back to original value, called "shadowing"
// }
// // will print out 20
// println!("{}", x);
// }
//------------------------------------------------------//
//*** Shadowing Cont... *** //
// Info: This allows for an immutable var to //
// occasionally be redifined, but //
// to in general still stay an immutable var //
//------------------------------------------------------//
// fn main() {
// let x = 5;
// let x = x + 1;
// let x = x * 2;
// println!("The value of x is: {}", x);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Guessing Game Program ***
//*** Info: Demonstrates usage of rand crate and ***
//*** some basic error handling ***
//---------------------------------------------//
// use std::cmp::Ordering;
// use std:: io;
// use rand:: Rng;
// fn main() {
// // to learn how to use the crates, like rand, do cargo doc --open in the command line to open up documentation in your browser
// let secret_num = rand::thread_rng().gen_range(1, 101);
// loop {
// println!("Guess the number!");
// println!("Input your guess:");
// // A new instance of a String, which is growable and UTF-8 encoded text
// // '::' means new is an associated function of the String type, not the instance of the string. Similar to a static method in other languages.
// let mut guess = String::new();
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it may or may not print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut guess)
// .expect("Failed to read"); //NOTE: will default to type i32
// // trim eliminates new lines and whitespaces that might exist in the string we are attempting to cast to an u32
// let guess: u32 = match guess.trim().parse() {
// Ok(num) => num,
// Err(_)=> continue, //the '_' is a catchall val that will continue if parse's result type return val is Err
// };
// println!("User guessed: {}", guess );
// match guess.cmp(&secret_num){
// Ordering::Less => println!("Too small!"),
// Ordering::Greater => println!("Too large!"),
// Ordering::Equal =>
// {
// println!("Correct!");
// break;
// }
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** References ***
//---------------------------------------------//
// fn main() {
// let mut jacob = 21;
// // creating a REFERENCE to jacob, will behave exactly as 'jacob' does
// //let jacobR = &jacob;
// // creating a mutable REFERENCE to jacob, that can be changed and WILL change 'jacob's value
// let jacobRM = &mut jacob;
// *jacobRM = 22;
// println!("{}", jacob );
// }
//__________________________________________________________________________________________________________ //
//* ANOTHER REFERENCES EXAMPLE *
// fn main() {
// let s1 = String::from("hello");
// let len = calculate_length(&s1);
// println!("The length of '{}' is {}.", s1, len);
// }
// fn calculate_length(s: &String) -> usize {
// s.len()
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Range and Reverse in a For Loop ***
//---------------------------------------------//
// fn main() {
// for number in (1..4).rev() {
// println!("{}!", number);
// }
// println!("LIFTOFF!!!");
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Cloning ***
// info: So that s1 and s2 dont point to the same
// piece of memory, is more expensive than the
// normal copying, which is really a "move"
//---------------------------------------------//
// fn main() {
// let s1 = String::from("hello");
// let s2 = s1.clone();
// println!("s1 = {}, s2 = {}", s1, s2);
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Find Nth Fibonacci Number ***
//---------------------------------------------//
// use std::io;
// //use std::cmp::Ordering;
// fn main() {
// let mut num = String::new();
// println!("What nth Fibonacci number would you like to find?");
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it will print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut num)
// .expect("Failed to read"); //NOTE: will default to type i32
// let num: u32 = num.trim().parse().expect("Was not an integer!");
// let nth = fib_num(num);
// println!("The nth Fibonnacci Number was: {}", nth );
// }
// fn fib_num(x: u32) -> u32{
// if x <= 1 {
// return x;
// }
// return fib_num(x-1) + fib_num(x-2);
// }
//
//__________________________________________________________________________________________________________ //
//
//---------------------------------------------------------------------------------------//
//*** SLICES: example, return first word in string, and second word in string function ***
//--------------------------------------------------------------------------------------//
//
// fn main () {
// let mut x = String::from("Hey there");
// let mut y = String::from("Hello dawg woof");
// println!("{}",first_word(&x));
// println!("{}",second_word(&y));
// }
// fn first_word(x : &String) -> &str {
// let bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &x[0..i];
// }
// }
// &x[..]
// }
// fn second_word(x : &String) -> &str {
// let mut bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b' '{
// let y = &x[i+1..];
// bytes = y.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &y[0..i];
// }
// }
// // Return this IF there were only two words.
// return &y[..];
// }
// }
// &x[..]
// } | random_line_split |
|
main.rs | // Rust notes
fn main () {
let mut x = String::from("Hey there");
let mut y = String::from("Hello dawg woof");
println!("{}",first_word(&x));
println!("{}",second_word(&y));
}
fn first_word(x : &String) -> &str {
let bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &x[0..i];
}
}
&x[..]
}
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' |
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch", 20.5, 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//---------------------------------------------//
// fn main() {
// let x = 5;
// print_number(x);
// if is_even(x){
// println!("Is even!");
// }
// else {
// println!("Is odd!", );
// }
// }
// fn print_number(num:u32){
// println!("number is {}", num);
// }
// // fn accepts u32 num and "returns" a bool
// fn is_even(num:u32) -> bool{
// return (num % 2 == 0);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Shadowing ***
//---------------------------------------------//
// fn main() {
// let mut x = 20;
// {
// let x = 15;
// // do stuff with this 15, then x will go back to original value, called "shadowing"
// }
// // will print out 20
// println!("{}", x);
// }
//------------------------------------------------------//
//*** Shadowing Cont... *** //
// Info: This allows for an immutable var to //
// occasionally be redifined, but //
// to in general still stay an immutable var //
//------------------------------------------------------//
// fn main() {
// let x = 5;
// let x = x + 1;
// let x = x * 2;
// println!("The value of x is: {}", x);
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Guessing Game Program ***
//*** Info: Demonstrates usage of rand crate and ***
//*** some basic error handling ***
//---------------------------------------------//
// use std::cmp::Ordering;
// use std:: io;
// use rand:: Rng;
// fn main() {
// // to learn how to use the crates, like rand, do cargo doc --open in the command line to open up documentation in your browser
// let secret_num = rand::thread_rng().gen_range(1, 101);
// loop {
// println!("Guess the number!");
// println!("Input your guess:");
// // A new instance of a String, which is growable and UTF-8 encoded text
// // '::' means new is an associated function of the String type, not the instance of the string. Similar to a static method in other languages.
// let mut guess = String::new();
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it may or may not print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut guess)
// .expect("Failed to read"); //NOTE: will default to type i32
// // trim eliminates new lines and whitespaces that might exist in the string we are attempting to cast to an u32
// let guess: u32 = match guess.trim().parse() {
// Ok(num) => num,
// Err(_)=> continue, //the '_' is a catchall val that will continue if parse's result type return val is Err
// };
// println!("User guessed: {}", guess );
// match guess.cmp(&secret_num){
// Ordering::Less => println!("Too small!"),
// Ordering::Greater => println!("Too large!"),
// Ordering::Equal =>
// {
// println!("Correct!");
// break;
// }
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** References ***
//---------------------------------------------//
// fn main() {
// let mut jacob = 21;
// // creating a REFERENCE to jacob, will behave exactly as 'jacob' does
// //let jacobR = &jacob;
// // creating a mutable REFERENCE to jacob, that can be changed and WILL change 'jacob's value
// let jacobRM = &mut jacob;
// *jacobRM = 22;
// println!("{}", jacob );
// }
//__________________________________________________________________________________________________________ //
//* ANOTHER REFERENCES EXAMPLE *
// fn main() {
// let s1 = String::from("hello");
// let len = calculate_length(&s1);
// println!("The length of '{}' is {}.", s1, len);
// }
// fn calculate_length(s: &String) -> usize {
// s.len()
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Range and Reverse in a For Loop ***
//---------------------------------------------//
// fn main() {
// for number in (1..4).rev() {
// println!("{}!", number);
// }
// println!("LIFTOFF!!!");
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Cloning ***
// info: So that s1 and s2 dont point to the same
// piece of memory, is more expensive than the
// normal copying, which is really a "move"
//---------------------------------------------//
// fn main() {
// let s1 = String::from("hello");
// let s2 = s1.clone();
// println!("s1 = {}, s2 = {}", s1, s2);
// }
//
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Find Nth Fibonacci Number ***
//---------------------------------------------//
// use std::io;
// //use std::cmp::Ordering;
// fn main() {
// let mut num = String::new();
// println!("What nth Fibonacci number would you like to find?");
// // calls read_line which takes in user input into guess, checks whether it returns err or ok, and depending on that it will print out the failure message
// // basically the expect part does some error checking right there, and will print the passed in err message and crash the program if err is returned from read_line
// io::stdin().read_line(&mut num)
// .expect("Failed to read"); //NOTE: will default to type i32
// let num: u32 = num.trim().parse().expect("Was not an integer!");
// let nth = fib_num(num);
// println!("The nth Fibonnacci Number was: {}", nth );
// }
// fn fib_num(x: u32) -> u32{
// if x <= 1 {
// return x;
// }
// return fib_num(x-1) + fib_num(x-2);
// }
//
//__________________________________________________________________________________________________________ //
//
//---------------------------------------------------------------------------------------//
//*** SLICES: example, return first word in string, and second word in string function ***
//--------------------------------------------------------------------------------------//
//
// fn main () {
// let mut x = String::from("Hey there");
// let mut y = String::from("Hello dawg woof");
// println!("{}",first_word(&x));
// println!("{}",second_word(&y));
// }
// fn first_word(x : &String) -> &str {
// let bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &x[0..i];
// }
// }
// &x[..]
// }
// fn second_word(x : &String) -> &str {
// let mut bytes = x.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b' '{
// let y = &x[i+1..];
// bytes = y.as_bytes();
// for (i, &item) in bytes.iter().enumerate() {
// if item == b''{
// return &y[0..i];
// }
// }
// // Return this IF there were only two words.
// return &y[..];
// }
// }
// &x[..]
// } | {
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
} | conditional_block |
client.rs | use std::io;
use std::io::prelude::*;
use std::net::TcpStream;
use std::cmp;
use rand::{Rng, OsRng};
use alert;
use tls_result::{TlsResult, TlsError, TlsErrorKind};
use tls_result::TlsErrorKind::{UnexpectedMessage, InternalError, DecryptError, IllegalParameter};
use util::{SurugaError, crypto_compare};
use cipher::{self, Aead};
use cipher::prf::Prf;
use crypto::sha2::sha256;
use tls_item::{TlsItem, DummyItem};
use handshake::{self, Handshake};
use tls::{TlsReader, TlsWriter, TLS_VERSION};
// handshake is done during construction.
pub struct TlsClient<R: Read, W: Write> {
pub reader: TlsReader<R>,
pub writer: TlsWriter<W>,
pub rng: OsRng,
buf: Vec<u8>,
}
impl<R: Read, W: Write> TlsClient<R, W> {
pub fn new(reader: R, writer: W, rng: OsRng) -> TlsResult<TlsClient<R, W>> {
let mut client = TlsClient {
reader: TlsReader::new(reader),
writer: TlsWriter::new(writer),
rng: rng,
buf: Vec::new(),
};
// handshake failed. send alert if necessary
match client.handshake() {
Ok(()) => {}
Err(err) => return Err(client.send_tls_alert(err)),
}
Ok(client)
}
#[inline]
pub fn reader(&mut self) -> &mut R {
self.reader.get_mut()
}
#[inline]
pub fn writer(&mut self) -> &mut W {
self.writer.get_mut()
}
// this does not send alert when error occurs
fn handshake(&mut self) -> TlsResult<()> {
// expect specific HandshakeMessage. otherwise return Err
macro_rules! expect {
($var:ident) => ({
match try!(self.reader.read_handshake()) {
handshake::Handshake::$var(data) => data,
_ => return tls_err!(UnexpectedMessage, "unexpected handshake message found"),
}
})
}
let cli_random = {
let mut random_bytes = [0u8; 32];
self.rng.fill_bytes(&mut random_bytes);
random_bytes.to_vec()
};
let random = try!(handshake::Random::new(cli_random.clone()));
// the only cipher we currently support
let cipher_suite = cipher::CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256;
let curve_list = vec!(handshake::NamedCurve::secp256r1);
let curve_list = try!(handshake::Extension::new_elliptic_curve_list(curve_list));
let format_list = vec!(handshake::ECPointFormat::uncompressed);
let format_list = try!(handshake::Extension::new_ec_point_formats(format_list));
let extensions = vec!(curve_list, format_list);
let client_hello = try!(Handshake::new_client_hello(random, cipher_suite, extensions));
try!(self.writer.write_handshake(&client_hello));
let server_hello_data = expect!(server_hello);
{
let server_major = server_hello_data.server_version.major;
let server_minor = server_hello_data.server_version.minor;
if (server_major, server_minor)!= TLS_VERSION {
return tls_err!(IllegalParameter,
"wrong server version: {} {}",
server_major,
server_minor);
}
if server_hello_data.cipher_suite!= cipher_suite {
return tls_err!(IllegalParameter,
"cipher suite mismatch: found {:?}",
server_hello_data.cipher_suite);
}
if server_hello_data.compression_method!= handshake::CompressionMethod::null {
return tls_err!(IllegalParameter, "compression method mismatch");
}
// FIXME: check if server sent unknown extension
// it is currently done by just not understanding any extensions
// other than we used.
}
// we always expect certificate.
let certificate_list = expect!(certificate);
// TODO: cert validation not implemented yet
// we always use server key exchange
let server_key_ex_data = expect!(server_key_exchange);
let kex = cipher_suite.new_kex();
let (key_data, pre_master_secret) = try!(kex.compute_keys(&server_key_ex_data,
&mut self.rng));
expect!(server_hello_done);
let client_key_exchange = try!(Handshake::new_client_key_exchange(key_data));
try!(self.writer.write_handshake(&client_key_exchange));
try!(self.writer.write_change_cipher_spec());
// SECRET
let master_secret = {
let mut label_seed = b"master secret".to_vec();
label_seed.extend(&cli_random);
label_seed.extend(&server_hello_data.random[..]);
let mut prf = Prf::new(pre_master_secret, label_seed);
prf.get_bytes(48)
};
let aead = cipher_suite.new_aead();
// SECRET
let read_key = {
let mut label_seed = b"key expansion".to_vec();
label_seed.extend(&server_hello_data.random[..]);
label_seed.extend(&cli_random);
let mut prf = Prf::new(master_secret.clone(), label_seed);
// mac_key is not used in AEAD configuration.
let enc_key_length = aead.key_size();
let write_key = prf.get_bytes(enc_key_length);
let encryptor = aead.new_encryptor(write_key);
self.writer.set_encryptor(encryptor);
// this will be set after receiving ChangeCipherSpec.
let read_key = prf.get_bytes(enc_key_length);
// chacha20-poly1305 does not use iv.
read_key
};
// FIXME we should get "raw" packet data and hash them incrementally
let msgs = {
let mut msgs = Vec::new();
try!(client_hello.tls_write(&mut msgs));
try!(Handshake::server_hello(server_hello_data).tls_write(&mut msgs));
try!(Handshake::certificate(certificate_list).tls_write(&mut msgs));
try!(Handshake::server_key_exchange(server_key_ex_data).tls_write(&mut msgs));
try!(Handshake::server_hello_done(DummyItem).tls_write(&mut msgs));
try!(client_key_exchange.tls_write(&mut msgs));
msgs
};
// this only verifies Handshake messages! what about others?
// ApplicationData messages are not permitted until now.
// ChangeCipherSpec messages are only permitted after ClinetKeyExchange.
// Alert messages can be problematic - they are not verified and
// can be broken into several records. This leads to alert attack.
// since we don't accept strange alerts, all "normal" alert messages are
// treated as error, so now we can assert that we haven't received alerts.
let verify_hash = sha256(&msgs);
let client_verify_data = {
let finished_label = b"client finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret.clone(), label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let finished = try!(Handshake::new_finished(client_verify_data));
try!(self.writer.write_handshake(&finished));
// Although client->server is encrypted, server->client isn't yet.
// server may send either ChangeCipherSpec or Alert.
try!(self.reader.read_change_cipher_spec());
// from now server starts encryption.
self.reader.set_decryptor(aead.new_decryptor(read_key));
let server_finished = expect!(finished);
{
let verify_hash = {
// ideally we may save "raw" packet data..
let mut serv_msgs = Vec::new();
// FIXME: this should not throw "io error".. should throw "internal error"
try!(Write::write_all(&mut serv_msgs, &msgs));
try!(finished.tls_write(&mut serv_msgs));
let verify_hash = sha256(&serv_msgs);
verify_hash
};
let server_verify_data = {
let finished_label = b"server finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret, label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let verify_ok = crypto_compare(&server_finished,
&server_verify_data);
if!verify_ok {
return tls_err!(DecryptError, "server sent wrong verify data");
}
}
Ok(())
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
impl TlsClient<TcpStream, TcpStream> {
pub fn from_tcp(stream: TcpStream) -> TlsResult<TlsClient<TcpStream, TcpStream>> {
let rng = match OsRng::new() {
Ok(rng) => rng,
Err(..) => return tls_err!(InternalError, "failed to create OsRng"),
};
let reader = try!(stream.try_clone());
let writer = stream;
TlsClient::new(reader, writer, rng)
}
}
impl<R: Read, W: Write> Write for TlsClient<R, W> {
// this either writes all or fails.
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
fn | (&mut self, buf: &[u8]) -> io::Result<()> {
let result = self.writer.write_application_data(buf);
match result {
Ok(()) => Ok(()),
Err(err) => {
let err = self.send_tls_alert(err);
// FIXME more verbose io error
Err(io::Error::new(io::ErrorKind::Other, SurugaError {
desc: "TLS write error",
cause: Some(Box::new(err)),
}))
}
}
}
}
// A replacement for the deprecated std::slice::bytes::copy_memory
fn copy_memory(from: &[u8], mut to: &mut [u8]) -> usize {
to.write(from).unwrap()
}
impl<R: Read, W: Write> Read for TlsClient<R, W> {
// if ssl connection is failed, return `EndOfFile`.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut pos: usize = 0;
let len = buf.len();
while pos < len {
let remaining = len - pos;
if self.buf.len() == 0 {
let data = match self.reader.read_application_data() {
Ok(data) => data,
Err(_err) => {
break; // FIXME: stop if EOF. otherwise raise error?
}
};
self.buf.extend(&data);
}
let selflen = self.buf.len();
let necessary = cmp::min(remaining, selflen);
copy_memory(&self.buf[.. necessary], &mut buf[pos.. pos + necessary]);
pos += necessary;
self.buf = self.buf[necessary..].to_vec();
}
Ok(pos)
}
}
| write_all | identifier_name |
client.rs | use std::io;
use std::io::prelude::*;
use std::net::TcpStream;
use std::cmp;
use rand::{Rng, OsRng};
use alert;
use tls_result::{TlsResult, TlsError, TlsErrorKind};
use tls_result::TlsErrorKind::{UnexpectedMessage, InternalError, DecryptError, IllegalParameter};
use util::{SurugaError, crypto_compare};
use cipher::{self, Aead};
use cipher::prf::Prf;
use crypto::sha2::sha256;
use tls_item::{TlsItem, DummyItem};
use handshake::{self, Handshake};
use tls::{TlsReader, TlsWriter, TLS_VERSION};
// handshake is done during construction.
pub struct TlsClient<R: Read, W: Write> {
pub reader: TlsReader<R>,
pub writer: TlsWriter<W>,
pub rng: OsRng,
buf: Vec<u8>,
}
impl<R: Read, W: Write> TlsClient<R, W> {
pub fn new(reader: R, writer: W, rng: OsRng) -> TlsResult<TlsClient<R, W>> {
let mut client = TlsClient {
reader: TlsReader::new(reader),
writer: TlsWriter::new(writer),
rng: rng,
buf: Vec::new(),
};
// handshake failed. send alert if necessary
match client.handshake() {
Ok(()) => {}
Err(err) => return Err(client.send_tls_alert(err)),
}
Ok(client)
}
#[inline]
pub fn reader(&mut self) -> &mut R {
self.reader.get_mut()
}
#[inline]
pub fn writer(&mut self) -> &mut W {
self.writer.get_mut()
}
// this does not send alert when error occurs
fn handshake(&mut self) -> TlsResult<()> {
// expect specific HandshakeMessage. otherwise return Err
macro_rules! expect {
($var:ident) => ({
match try!(self.reader.read_handshake()) {
handshake::Handshake::$var(data) => data,
_ => return tls_err!(UnexpectedMessage, "unexpected handshake message found"),
}
})
}
let cli_random = {
let mut random_bytes = [0u8; 32];
self.rng.fill_bytes(&mut random_bytes);
random_bytes.to_vec()
};
let random = try!(handshake::Random::new(cli_random.clone()));
// the only cipher we currently support
let cipher_suite = cipher::CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256;
let curve_list = vec!(handshake::NamedCurve::secp256r1);
let curve_list = try!(handshake::Extension::new_elliptic_curve_list(curve_list));
let format_list = vec!(handshake::ECPointFormat::uncompressed);
let format_list = try!(handshake::Extension::new_ec_point_formats(format_list));
let extensions = vec!(curve_list, format_list);
let client_hello = try!(Handshake::new_client_hello(random, cipher_suite, extensions));
try!(self.writer.write_handshake(&client_hello));
let server_hello_data = expect!(server_hello);
{
let server_major = server_hello_data.server_version.major;
let server_minor = server_hello_data.server_version.minor;
if (server_major, server_minor)!= TLS_VERSION {
return tls_err!(IllegalParameter,
"wrong server version: {} {}",
server_major,
server_minor);
}
if server_hello_data.cipher_suite!= cipher_suite {
return tls_err!(IllegalParameter,
"cipher suite mismatch: found {:?}",
server_hello_data.cipher_suite);
}
if server_hello_data.compression_method!= handshake::CompressionMethod::null {
return tls_err!(IllegalParameter, "compression method mismatch");
}
// FIXME: check if server sent unknown extension
// it is currently done by just not understanding any extensions
// other than we used.
}
// we always expect certificate.
let certificate_list = expect!(certificate);
// TODO: cert validation not implemented yet
// we always use server key exchange
let server_key_ex_data = expect!(server_key_exchange);
let kex = cipher_suite.new_kex();
let (key_data, pre_master_secret) = try!(kex.compute_keys(&server_key_ex_data,
&mut self.rng));
expect!(server_hello_done);
let client_key_exchange = try!(Handshake::new_client_key_exchange(key_data)); | try!(self.writer.write_handshake(&client_key_exchange));
try!(self.writer.write_change_cipher_spec());
// SECRET
let master_secret = {
let mut label_seed = b"master secret".to_vec();
label_seed.extend(&cli_random);
label_seed.extend(&server_hello_data.random[..]);
let mut prf = Prf::new(pre_master_secret, label_seed);
prf.get_bytes(48)
};
let aead = cipher_suite.new_aead();
// SECRET
let read_key = {
let mut label_seed = b"key expansion".to_vec();
label_seed.extend(&server_hello_data.random[..]);
label_seed.extend(&cli_random);
let mut prf = Prf::new(master_secret.clone(), label_seed);
// mac_key is not used in AEAD configuration.
let enc_key_length = aead.key_size();
let write_key = prf.get_bytes(enc_key_length);
let encryptor = aead.new_encryptor(write_key);
self.writer.set_encryptor(encryptor);
// this will be set after receiving ChangeCipherSpec.
let read_key = prf.get_bytes(enc_key_length);
// chacha20-poly1305 does not use iv.
read_key
};
// FIXME we should get "raw" packet data and hash them incrementally
let msgs = {
let mut msgs = Vec::new();
try!(client_hello.tls_write(&mut msgs));
try!(Handshake::server_hello(server_hello_data).tls_write(&mut msgs));
try!(Handshake::certificate(certificate_list).tls_write(&mut msgs));
try!(Handshake::server_key_exchange(server_key_ex_data).tls_write(&mut msgs));
try!(Handshake::server_hello_done(DummyItem).tls_write(&mut msgs));
try!(client_key_exchange.tls_write(&mut msgs));
msgs
};
// this only verifies Handshake messages! what about others?
// ApplicationData messages are not permitted until now.
// ChangeCipherSpec messages are only permitted after ClinetKeyExchange.
// Alert messages can be problematic - they are not verified and
// can be broken into several records. This leads to alert attack.
// since we don't accept strange alerts, all "normal" alert messages are
// treated as error, so now we can assert that we haven't received alerts.
let verify_hash = sha256(&msgs);
let client_verify_data = {
let finished_label = b"client finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret.clone(), label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let finished = try!(Handshake::new_finished(client_verify_data));
try!(self.writer.write_handshake(&finished));
// Although client->server is encrypted, server->client isn't yet.
// server may send either ChangeCipherSpec or Alert.
try!(self.reader.read_change_cipher_spec());
// from now server starts encryption.
self.reader.set_decryptor(aead.new_decryptor(read_key));
let server_finished = expect!(finished);
{
let verify_hash = {
// ideally we may save "raw" packet data..
let mut serv_msgs = Vec::new();
// FIXME: this should not throw "io error".. should throw "internal error"
try!(Write::write_all(&mut serv_msgs, &msgs));
try!(finished.tls_write(&mut serv_msgs));
let verify_hash = sha256(&serv_msgs);
verify_hash
};
let server_verify_data = {
let finished_label = b"server finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret, label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let verify_ok = crypto_compare(&server_finished,
&server_verify_data);
if!verify_ok {
return tls_err!(DecryptError, "server sent wrong verify data");
}
}
Ok(())
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
impl TlsClient<TcpStream, TcpStream> {
pub fn from_tcp(stream: TcpStream) -> TlsResult<TlsClient<TcpStream, TcpStream>> {
let rng = match OsRng::new() {
Ok(rng) => rng,
Err(..) => return tls_err!(InternalError, "failed to create OsRng"),
};
let reader = try!(stream.try_clone());
let writer = stream;
TlsClient::new(reader, writer, rng)
}
}
impl<R: Read, W: Write> Write for TlsClient<R, W> {
// this either writes all or fails.
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
let result = self.writer.write_application_data(buf);
match result {
Ok(()) => Ok(()),
Err(err) => {
let err = self.send_tls_alert(err);
// FIXME more verbose io error
Err(io::Error::new(io::ErrorKind::Other, SurugaError {
desc: "TLS write error",
cause: Some(Box::new(err)),
}))
}
}
}
}
// A replacement for the deprecated std::slice::bytes::copy_memory
fn copy_memory(from: &[u8], mut to: &mut [u8]) -> usize {
to.write(from).unwrap()
}
impl<R: Read, W: Write> Read for TlsClient<R, W> {
// if ssl connection is failed, return `EndOfFile`.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut pos: usize = 0;
let len = buf.len();
while pos < len {
let remaining = len - pos;
if self.buf.len() == 0 {
let data = match self.reader.read_application_data() {
Ok(data) => data,
Err(_err) => {
break; // FIXME: stop if EOF. otherwise raise error?
}
};
self.buf.extend(&data);
}
let selflen = self.buf.len();
let necessary = cmp::min(remaining, selflen);
copy_memory(&self.buf[.. necessary], &mut buf[pos.. pos + necessary]);
pos += necessary;
self.buf = self.buf[necessary..].to_vec();
}
Ok(pos)
}
} | random_line_split |
|
client.rs | use std::io;
use std::io::prelude::*;
use std::net::TcpStream;
use std::cmp;
use rand::{Rng, OsRng};
use alert;
use tls_result::{TlsResult, TlsError, TlsErrorKind};
use tls_result::TlsErrorKind::{UnexpectedMessage, InternalError, DecryptError, IllegalParameter};
use util::{SurugaError, crypto_compare};
use cipher::{self, Aead};
use cipher::prf::Prf;
use crypto::sha2::sha256;
use tls_item::{TlsItem, DummyItem};
use handshake::{self, Handshake};
use tls::{TlsReader, TlsWriter, TLS_VERSION};
// handshake is done during construction.
pub struct TlsClient<R: Read, W: Write> {
pub reader: TlsReader<R>,
pub writer: TlsWriter<W>,
pub rng: OsRng,
buf: Vec<u8>,
}
impl<R: Read, W: Write> TlsClient<R, W> {
pub fn new(reader: R, writer: W, rng: OsRng) -> TlsResult<TlsClient<R, W>> {
let mut client = TlsClient {
reader: TlsReader::new(reader),
writer: TlsWriter::new(writer),
rng: rng,
buf: Vec::new(),
};
// handshake failed. send alert if necessary
match client.handshake() {
Ok(()) => {}
Err(err) => return Err(client.send_tls_alert(err)),
}
Ok(client)
}
#[inline]
pub fn reader(&mut self) -> &mut R {
self.reader.get_mut()
}
#[inline]
pub fn writer(&mut self) -> &mut W {
self.writer.get_mut()
}
// this does not send alert when error occurs
fn handshake(&mut self) -> TlsResult<()> {
// expect specific HandshakeMessage. otherwise return Err
macro_rules! expect {
($var:ident) => ({
match try!(self.reader.read_handshake()) {
handshake::Handshake::$var(data) => data,
_ => return tls_err!(UnexpectedMessage, "unexpected handshake message found"),
}
})
}
let cli_random = {
let mut random_bytes = [0u8; 32];
self.rng.fill_bytes(&mut random_bytes);
random_bytes.to_vec()
};
let random = try!(handshake::Random::new(cli_random.clone()));
// the only cipher we currently support
let cipher_suite = cipher::CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256;
let curve_list = vec!(handshake::NamedCurve::secp256r1);
let curve_list = try!(handshake::Extension::new_elliptic_curve_list(curve_list));
let format_list = vec!(handshake::ECPointFormat::uncompressed);
let format_list = try!(handshake::Extension::new_ec_point_formats(format_list));
let extensions = vec!(curve_list, format_list);
let client_hello = try!(Handshake::new_client_hello(random, cipher_suite, extensions));
try!(self.writer.write_handshake(&client_hello));
let server_hello_data = expect!(server_hello);
{
let server_major = server_hello_data.server_version.major;
let server_minor = server_hello_data.server_version.minor;
if (server_major, server_minor)!= TLS_VERSION {
return tls_err!(IllegalParameter,
"wrong server version: {} {}",
server_major,
server_minor);
}
if server_hello_data.cipher_suite!= cipher_suite {
return tls_err!(IllegalParameter,
"cipher suite mismatch: found {:?}",
server_hello_data.cipher_suite);
}
if server_hello_data.compression_method!= handshake::CompressionMethod::null {
return tls_err!(IllegalParameter, "compression method mismatch");
}
// FIXME: check if server sent unknown extension
// it is currently done by just not understanding any extensions
// other than we used.
}
// we always expect certificate.
let certificate_list = expect!(certificate);
// TODO: cert validation not implemented yet
// we always use server key exchange
let server_key_ex_data = expect!(server_key_exchange);
let kex = cipher_suite.new_kex();
let (key_data, pre_master_secret) = try!(kex.compute_keys(&server_key_ex_data,
&mut self.rng));
expect!(server_hello_done);
let client_key_exchange = try!(Handshake::new_client_key_exchange(key_data));
try!(self.writer.write_handshake(&client_key_exchange));
try!(self.writer.write_change_cipher_spec());
// SECRET
let master_secret = {
let mut label_seed = b"master secret".to_vec();
label_seed.extend(&cli_random);
label_seed.extend(&server_hello_data.random[..]);
let mut prf = Prf::new(pre_master_secret, label_seed);
prf.get_bytes(48)
};
let aead = cipher_suite.new_aead();
// SECRET
let read_key = {
let mut label_seed = b"key expansion".to_vec();
label_seed.extend(&server_hello_data.random[..]);
label_seed.extend(&cli_random);
let mut prf = Prf::new(master_secret.clone(), label_seed);
// mac_key is not used in AEAD configuration.
let enc_key_length = aead.key_size();
let write_key = prf.get_bytes(enc_key_length);
let encryptor = aead.new_encryptor(write_key);
self.writer.set_encryptor(encryptor);
// this will be set after receiving ChangeCipherSpec.
let read_key = prf.get_bytes(enc_key_length);
// chacha20-poly1305 does not use iv.
read_key
};
// FIXME we should get "raw" packet data and hash them incrementally
let msgs = {
let mut msgs = Vec::new();
try!(client_hello.tls_write(&mut msgs));
try!(Handshake::server_hello(server_hello_data).tls_write(&mut msgs));
try!(Handshake::certificate(certificate_list).tls_write(&mut msgs));
try!(Handshake::server_key_exchange(server_key_ex_data).tls_write(&mut msgs));
try!(Handshake::server_hello_done(DummyItem).tls_write(&mut msgs));
try!(client_key_exchange.tls_write(&mut msgs));
msgs
};
// this only verifies Handshake messages! what about others?
// ApplicationData messages are not permitted until now.
// ChangeCipherSpec messages are only permitted after ClinetKeyExchange.
// Alert messages can be problematic - they are not verified and
// can be broken into several records. This leads to alert attack.
// since we don't accept strange alerts, all "normal" alert messages are
// treated as error, so now we can assert that we haven't received alerts.
let verify_hash = sha256(&msgs);
let client_verify_data = {
let finished_label = b"client finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret.clone(), label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let finished = try!(Handshake::new_finished(client_verify_data));
try!(self.writer.write_handshake(&finished));
// Although client->server is encrypted, server->client isn't yet.
// server may send either ChangeCipherSpec or Alert.
try!(self.reader.read_change_cipher_spec());
// from now server starts encryption.
self.reader.set_decryptor(aead.new_decryptor(read_key));
let server_finished = expect!(finished);
{
let verify_hash = {
// ideally we may save "raw" packet data..
let mut serv_msgs = Vec::new();
// FIXME: this should not throw "io error".. should throw "internal error"
try!(Write::write_all(&mut serv_msgs, &msgs));
try!(finished.tls_write(&mut serv_msgs));
let verify_hash = sha256(&serv_msgs);
verify_hash
};
let server_verify_data = {
let finished_label = b"server finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret, label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let verify_ok = crypto_compare(&server_finished,
&server_verify_data);
if!verify_ok {
return tls_err!(DecryptError, "server sent wrong verify data");
}
}
Ok(())
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
impl TlsClient<TcpStream, TcpStream> {
pub fn from_tcp(stream: TcpStream) -> TlsResult<TlsClient<TcpStream, TcpStream>> {
let rng = match OsRng::new() {
Ok(rng) => rng,
Err(..) => return tls_err!(InternalError, "failed to create OsRng"),
};
let reader = try!(stream.try_clone());
let writer = stream;
TlsClient::new(reader, writer, rng)
}
}
impl<R: Read, W: Write> Write for TlsClient<R, W> {
// this either writes all or fails.
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
let result = self.writer.write_application_data(buf);
match result {
Ok(()) => Ok(()),
Err(err) => {
let err = self.send_tls_alert(err);
// FIXME more verbose io error
Err(io::Error::new(io::ErrorKind::Other, SurugaError {
desc: "TLS write error",
cause: Some(Box::new(err)),
}))
}
}
}
}
// A replacement for the deprecated std::slice::bytes::copy_memory
fn copy_memory(from: &[u8], mut to: &mut [u8]) -> usize |
impl<R: Read, W: Write> Read for TlsClient<R, W> {
// if ssl connection is failed, return `EndOfFile`.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut pos: usize = 0;
let len = buf.len();
while pos < len {
let remaining = len - pos;
if self.buf.len() == 0 {
let data = match self.reader.read_application_data() {
Ok(data) => data,
Err(_err) => {
break; // FIXME: stop if EOF. otherwise raise error?
}
};
self.buf.extend(&data);
}
let selflen = self.buf.len();
let necessary = cmp::min(remaining, selflen);
copy_memory(&self.buf[.. necessary], &mut buf[pos.. pos + necessary]);
pos += necessary;
self.buf = self.buf[necessary..].to_vec();
}
Ok(pos)
}
}
| {
to.write(from).unwrap()
} | identifier_body |
codec.rs | use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::BytesMut;
// We use `futures::stream::Stream` because `std::async_iter::AsyncIterator` (previously known as
// `std::stream::Stream`) is still a bare-bones implementation - it does not even have an
// `async fn next` method!
use futures::{future::BoxFuture, sink, stream};
use g1_base::fmt::{DebugExt, InsertPlaceholder};
use super::{StreamRecv, StreamSend};
#[async_trait]
pub trait Decode<Stream>
where
Stream: StreamRecv,
{
type Item;
type Error: From<Stream::Error>;
/// Decodes a byte stream and returns a stream item or an error when EOF is reached, but the
/// byte stream buffer is not empty.
async fn decode(&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error>;
}
pub trait Encode<Item> {
/// Encodes a stream item and writes the output to a byte stream buffer.
fn encode(&mut self, item: &Item, buffer: &mut BytesMut);
}
// TODO: We intend to implement `Decode` for async functions, similar to what we do for `Encode`.
// However, for unknown reasons, it is causing the compiler to crash. Currently, we are only able
// to provide an implementation for non-async functions.
#[async_trait]
impl<Stream, DecodeFn, Item, Error> Decode<Stream> for DecodeFn
where
Stream: StreamRecv + Send,
DecodeFn: Fn(&mut BytesMut) -> Result<Option<Item>, Error> + Send,
Item: Send,
Error: From<Stream::Error> + Send,
{
type Item = Item;
type Error = Error;
async fn decode(&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error> {
loop {
if let Some(item) = self(&mut stream.buffer())? {
return Ok(Some(item));
}
if stream.recv_or_eof().await?.is_none() {
if stream.buffer().is_empty() {
return Ok(None);
} else {
// Return the `UnexpectedEof` error raised by the `recv` function.
return Err(stream.recv().await.expect_err("expect EOF").into());
}
}
}
}
}
impl<EncodeFn, Item> Encode<Item> for EncodeFn
where
EncodeFn: Fn(&Item, &mut BytesMut),
{
fn encode(&mut self, item: &Item, buffer: &mut BytesMut) {
self(item, buffer)
}
}
//
// Implementer's notes: We store future values in `Source` and `Sink`. These future values have to
// satisfy the `'static` lifetime bound because trait methods like `Stream::poll_next` do not take
// a lifetime parameter on `&mut Self`. To satisfy this, when producing a future value, we move
// all related values into it, and they will be moved back after it is completed (so that we can
// produce the next future value).
//
// There may be other ways to satisfy the `'static` lifetime bound, but for now, this "move" trick
// is the best I have.
//
/// Byte Stream to `futures::stream::Stream` Adapter
#[derive(DebugExt)]
pub struct Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
#[debug(with = InsertPlaceholder)]
source: Option<(Stream, Decoder)>,
#[debug(with = InsertPlaceholder)]
next_future: Option<SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error>>,
}
/// `futures::sink::Sink` to Byte Stream Adapter
#[derive(DebugExt)]
pub struct Sink<Stream, Encoder>
where
Stream: StreamSend,
{
#[debug(with = InsertPlaceholder)]
stream: Option<Stream>,
#[debug(with = InsertPlaceholder)]
encoder: Encoder,
#[debug(with = InsertPlaceholder)]
flush_future: Option<SinkFuture<Stream, Stream::Error>>,
#[debug(with = InsertPlaceholder)]
close_future: Option<SinkFuture<Stream, Stream::Error>>,
}
// TODO: Use where clauses to simplify these type aliases when rustc starts enforcing where clauses
// in type aliases. For more details, check [rust-lang/rust#21903][#21903].
//
// [#21903]: https://github.com/rust-lang/rust/issues/21903
type SourceFuture<Stream, Decoder, Item, Error> =
BoxFuture<'static, SourceOutput<Stream, Decoder, Item, Error>>;
type SourceOutput<Stream, Decoder, Item, Error> = ((Stream, Decoder), Option<Result<Item, Error>>);
type SinkFuture<Stream, Error> = BoxFuture<'static, SinkOutput<Stream, Error>>;
type SinkOutput<Stream, Error> = (Stream, Result<(), Error>);
macro_rules! poll {
($this:ident, $get_future:ident, $context:ident $(,)?) => {
match $this.$get_future().as_mut().poll($context) {
Poll::Ready((state, result)) => {
$this.reset(state);
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
};
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
pub fn new(stream: Stream, decoder: Decoder) -> Self {
Self {
source: Some((stream, decoder)),
next_future: None,
}
}
fn reset(&mut self, source: (Stream, Decoder)) {
self.source = Some(source);
self.next_future = None;
}
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv + Send +'static,
Decoder: Decode<Stream> + Send +'static,
{
fn next_future(&mut self) -> &mut SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error> {
self.next_future
.get_or_insert_with(|| Box::pin(Self::next(self.source.take().unwrap())))
}
async fn next(
(mut stream, mut decoder): (Stream, Decoder),
) -> SourceOutput<Stream, Decoder, Decoder::Item, Decoder::Error> {
let result = decoder.decode(&mut stream).await.transpose();
((stream, decoder), result)
}
}
impl<Stream, Decoder> stream::Stream for Source<Stream, Decoder>
where
Stream: StreamRecv + Send + Unpin +'static,
Decoder: Decode<Stream> + Send + Unpin +'static,
{
type Item = Result<Decoder::Item, Decoder::Error>;
fn poll_next(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
poll!(this, next_future, context)
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend,
{
pub fn new(stream: Stream, encoder: Encoder) -> Self {
Self {
stream: Some(stream),
encoder,
flush_future: None,
close_future: None,
}
}
fn reset(&mut self, stream: Stream) {
self.stream = Some(stream);
self.flush_future = None;
self.close_future = None;
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend + Send +'static,
Encoder:'static,
{
fn flush_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.flush_future
.get_or_insert_with(|| Box::pin(Self::flush(self.stream.take().unwrap())))
}
async fn flush(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.send_all().await;
(stream, result)
}
fn close_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.close_future
.get_or_insert_with(|| Box::pin(Self::close(self.stream.take().unwrap())))
}
async fn close(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.shutdown().await;
(stream, result)
}
}
impl<Stream, Encoder, Item> sink::Sink<Item> for Sink<Stream, Encoder>
where
Stream: StreamSend + Send + Unpin +'static,
Encoder: Encode<Item> + Unpin +'static,
{
type Error = Stream::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.get_mut().stream {
Some(_) => Poll::Ready(Ok(())),
None => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
let this = self.get_mut();
this.encoder
.encode(&item, &mut this.stream.as_mut().unwrap().buffer());
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, flush_future, context)
}
fn poll_close(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, close_future, context)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::{Error, ErrorKind};
use bytes::{Buf, BufMut};
use futures::{sink::SinkExt, stream::StreamExt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::io::{RecvStream, SendStream};
use super::*;
fn decode(buffer: &mut BytesMut) -> Result<Option<String>, Error> {
if buffer.remaining() < 1 {
return Ok(None);
}
let size = usize::from(buffer[0]);
if buffer.remaining() < 1 + size {
return Ok(None);
}
let mut vec = vec![0u8; size];
buffer.get_u8();
buffer.copy_to_slice(&mut vec);
Ok(Some(String::from_utf8(vec).unwrap()))
}
#[tokio::test]
async fn source() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello world\x03foo\x03bar")
.await
.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Ok(x)) if x == "hello world");
assert_matches!(source.next().await, Some(Ok(x)) if x == "foo");
assert_matches!(source.next().await, Some(Ok(x)) if x == "bar");
assert_matches!(source.next().await, None);
}
#[tokio::test]
async fn source_unexpected_eof() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello").await.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Err(e)) if e.kind() == ErrorKind::UnexpectedEof);
}
| buffer.put_u8(item.len().try_into().unwrap());
buffer.put_slice(item.as_bytes());
}
#[tokio::test]
async fn sink() {
let (stream, mut mock) = SendStream::new_mock(4096);
let mut sink = Sink::new(stream, encode);
assert_matches!(sink.feed("hello world".to_string()).await, Ok(()));
assert_matches!(sink.flush().await, Ok(()));
let mut buffer = BytesMut::new();
assert_matches!(mock.read_buf(&mut buffer).await, Ok(12));
assert_eq!(buffer.as_ref(), b"\x0bhello world");
assert_matches!(sink.feed("foo".to_string()).await, Ok(()));
assert_matches!(sink.feed("bar".to_string()).await, Ok(()));
assert_matches!(sink.close().await, Ok(()));
buffer.clear();
assert_matches!(mock.read_buf(&mut buffer).await, Ok(8));
assert_eq!(buffer.as_ref(), b"\x03foo\x03bar");
}
} | fn encode(item: &String, buffer: &mut BytesMut) { | random_line_split |
codec.rs | use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::BytesMut;
// We use `futures::stream::Stream` because `std::async_iter::AsyncIterator` (previously known as
// `std::stream::Stream`) is still a bare-bones implementation - it does not even have an
// `async fn next` method!
use futures::{future::BoxFuture, sink, stream};
use g1_base::fmt::{DebugExt, InsertPlaceholder};
use super::{StreamRecv, StreamSend};
#[async_trait]
pub trait Decode<Stream>
where
Stream: StreamRecv,
{
type Item;
type Error: From<Stream::Error>;
/// Decodes a byte stream and returns a stream item or an error when EOF is reached, but the
/// byte stream buffer is not empty.
async fn decode(&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error>;
}
pub trait Encode<Item> {
/// Encodes a stream item and writes the output to a byte stream buffer.
fn encode(&mut self, item: &Item, buffer: &mut BytesMut);
}
// TODO: We intend to implement `Decode` for async functions, similar to what we do for `Encode`.
// However, for unknown reasons, it is causing the compiler to crash. Currently, we are only able
// to provide an implementation for non-async functions.
#[async_trait]
impl<Stream, DecodeFn, Item, Error> Decode<Stream> for DecodeFn
where
Stream: StreamRecv + Send,
DecodeFn: Fn(&mut BytesMut) -> Result<Option<Item>, Error> + Send,
Item: Send,
Error: From<Stream::Error> + Send,
{
type Item = Item;
type Error = Error;
async fn decode(&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error> {
loop {
if let Some(item) = self(&mut stream.buffer())? {
return Ok(Some(item));
}
if stream.recv_or_eof().await?.is_none() {
if stream.buffer().is_empty() {
return Ok(None);
} else {
// Return the `UnexpectedEof` error raised by the `recv` function.
return Err(stream.recv().await.expect_err("expect EOF").into());
}
}
}
}
}
impl<EncodeFn, Item> Encode<Item> for EncodeFn
where
EncodeFn: Fn(&Item, &mut BytesMut),
{
fn encode(&mut self, item: &Item, buffer: &mut BytesMut) {
self(item, buffer)
}
}
//
// Implementer's notes: We store future values in `Source` and `Sink`. These future values have to
// satisfy the `'static` lifetime bound because trait methods like `Stream::poll_next` do not take
// a lifetime parameter on `&mut Self`. To satisfy this, when producing a future value, we move
// all related values into it, and they will be moved back after it is completed (so that we can
// produce the next future value).
//
// There may be other ways to satisfy the `'static` lifetime bound, but for now, this "move" trick
// is the best I have.
//
/// Byte Stream to `futures::stream::Stream` Adapter
#[derive(DebugExt)]
pub struct Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
#[debug(with = InsertPlaceholder)]
source: Option<(Stream, Decoder)>,
#[debug(with = InsertPlaceholder)]
next_future: Option<SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error>>,
}
/// `futures::sink::Sink` to Byte Stream Adapter
#[derive(DebugExt)]
pub struct Sink<Stream, Encoder>
where
Stream: StreamSend,
{
#[debug(with = InsertPlaceholder)]
stream: Option<Stream>,
#[debug(with = InsertPlaceholder)]
encoder: Encoder,
#[debug(with = InsertPlaceholder)]
flush_future: Option<SinkFuture<Stream, Stream::Error>>,
#[debug(with = InsertPlaceholder)]
close_future: Option<SinkFuture<Stream, Stream::Error>>,
}
// TODO: Use where clauses to simplify these type aliases when rustc starts enforcing where clauses
// in type aliases. For more details, check [rust-lang/rust#21903][#21903].
//
// [#21903]: https://github.com/rust-lang/rust/issues/21903
type SourceFuture<Stream, Decoder, Item, Error> =
BoxFuture<'static, SourceOutput<Stream, Decoder, Item, Error>>;
type SourceOutput<Stream, Decoder, Item, Error> = ((Stream, Decoder), Option<Result<Item, Error>>);
type SinkFuture<Stream, Error> = BoxFuture<'static, SinkOutput<Stream, Error>>;
type SinkOutput<Stream, Error> = (Stream, Result<(), Error>);
macro_rules! poll {
($this:ident, $get_future:ident, $context:ident $(,)?) => {
match $this.$get_future().as_mut().poll($context) {
Poll::Ready((state, result)) => {
$this.reset(state);
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
};
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
pub fn new(stream: Stream, decoder: Decoder) -> Self {
Self {
source: Some((stream, decoder)),
next_future: None,
}
}
fn reset(&mut self, source: (Stream, Decoder)) {
self.source = Some(source);
self.next_future = None;
}
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv + Send +'static,
Decoder: Decode<Stream> + Send +'static,
{
fn next_future(&mut self) -> &mut SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error> {
self.next_future
.get_or_insert_with(|| Box::pin(Self::next(self.source.take().unwrap())))
}
async fn next(
(mut stream, mut decoder): (Stream, Decoder),
) -> SourceOutput<Stream, Decoder, Decoder::Item, Decoder::Error> {
let result = decoder.decode(&mut stream).await.transpose();
((stream, decoder), result)
}
}
impl<Stream, Decoder> stream::Stream for Source<Stream, Decoder>
where
Stream: StreamRecv + Send + Unpin +'static,
Decoder: Decode<Stream> + Send + Unpin +'static,
{
type Item = Result<Decoder::Item, Decoder::Error>;
fn poll_next(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
poll!(this, next_future, context)
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend,
{
pub fn new(stream: Stream, encoder: Encoder) -> Self {
Self {
stream: Some(stream),
encoder,
flush_future: None,
close_future: None,
}
}
fn reset(&mut self, stream: Stream) {
self.stream = Some(stream);
self.flush_future = None;
self.close_future = None;
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend + Send +'static,
Encoder:'static,
{
fn flush_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.flush_future
.get_or_insert_with(|| Box::pin(Self::flush(self.stream.take().unwrap())))
}
async fn flush(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.send_all().await;
(stream, result)
}
fn close_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.close_future
.get_or_insert_with(|| Box::pin(Self::close(self.stream.take().unwrap())))
}
async fn close(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.shutdown().await;
(stream, result)
}
}
impl<Stream, Encoder, Item> sink::Sink<Item> for Sink<Stream, Encoder>
where
Stream: StreamSend + Send + Unpin +'static,
Encoder: Encode<Item> + Unpin +'static,
{
type Error = Stream::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.get_mut().stream {
Some(_) => Poll::Ready(Ok(())),
None => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
let this = self.get_mut();
this.encoder
.encode(&item, &mut this.stream.as_mut().unwrap().buffer());
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, flush_future, context)
}
fn poll_close(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, close_future, context)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::{Error, ErrorKind};
use bytes::{Buf, BufMut};
use futures::{sink::SinkExt, stream::StreamExt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::io::{RecvStream, SendStream};
use super::*;
fn decode(buffer: &mut BytesMut) -> Result<Option<String>, Error> |
#[tokio::test]
async fn source() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello world\x03foo\x03bar")
.await
.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Ok(x)) if x == "hello world");
assert_matches!(source.next().await, Some(Ok(x)) if x == "foo");
assert_matches!(source.next().await, Some(Ok(x)) if x == "bar");
assert_matches!(source.next().await, None);
}
#[tokio::test]
async fn source_unexpected_eof() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello").await.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Err(e)) if e.kind() == ErrorKind::UnexpectedEof);
}
fn encode(item: &String, buffer: &mut BytesMut) {
buffer.put_u8(item.len().try_into().unwrap());
buffer.put_slice(item.as_bytes());
}
#[tokio::test]
async fn sink() {
let (stream, mut mock) = SendStream::new_mock(4096);
let mut sink = Sink::new(stream, encode);
assert_matches!(sink.feed("hello world".to_string()).await, Ok(()));
assert_matches!(sink.flush().await, Ok(()));
let mut buffer = BytesMut::new();
assert_matches!(mock.read_buf(&mut buffer).await, Ok(12));
assert_eq!(buffer.as_ref(), b"\x0bhello world");
assert_matches!(sink.feed("foo".to_string()).await, Ok(()));
assert_matches!(sink.feed("bar".to_string()).await, Ok(()));
assert_matches!(sink.close().await, Ok(()));
buffer.clear();
assert_matches!(mock.read_buf(&mut buffer).await, Ok(8));
assert_eq!(buffer.as_ref(), b"\x03foo\x03bar");
}
}
| {
if buffer.remaining() < 1 {
return Ok(None);
}
let size = usize::from(buffer[0]);
if buffer.remaining() < 1 + size {
return Ok(None);
}
let mut vec = vec![0u8; size];
buffer.get_u8();
buffer.copy_to_slice(&mut vec);
Ok(Some(String::from_utf8(vec).unwrap()))
} | identifier_body |
codec.rs | use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::BytesMut;
// We use `futures::stream::Stream` because `std::async_iter::AsyncIterator` (previously known as
// `std::stream::Stream`) is still a bare-bones implementation - it does not even have an
// `async fn next` method!
use futures::{future::BoxFuture, sink, stream};
use g1_base::fmt::{DebugExt, InsertPlaceholder};
use super::{StreamRecv, StreamSend};
#[async_trait]
pub trait Decode<Stream>
where
Stream: StreamRecv,
{
type Item;
type Error: From<Stream::Error>;
/// Decodes a byte stream and returns a stream item or an error when EOF is reached, but the
/// byte stream buffer is not empty.
async fn decode(&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error>;
}
pub trait Encode<Item> {
/// Encodes a stream item and writes the output to a byte stream buffer.
fn encode(&mut self, item: &Item, buffer: &mut BytesMut);
}
// TODO: We intend to implement `Decode` for async functions, similar to what we do for `Encode`.
// However, for unknown reasons, it is causing the compiler to crash. Currently, we are only able
// to provide an implementation for non-async functions.
#[async_trait]
impl<Stream, DecodeFn, Item, Error> Decode<Stream> for DecodeFn
where
Stream: StreamRecv + Send,
DecodeFn: Fn(&mut BytesMut) -> Result<Option<Item>, Error> + Send,
Item: Send,
Error: From<Stream::Error> + Send,
{
type Item = Item;
type Error = Error;
async fn | (&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error> {
loop {
if let Some(item) = self(&mut stream.buffer())? {
return Ok(Some(item));
}
if stream.recv_or_eof().await?.is_none() {
if stream.buffer().is_empty() {
return Ok(None);
} else {
// Return the `UnexpectedEof` error raised by the `recv` function.
return Err(stream.recv().await.expect_err("expect EOF").into());
}
}
}
}
}
impl<EncodeFn, Item> Encode<Item> for EncodeFn
where
EncodeFn: Fn(&Item, &mut BytesMut),
{
fn encode(&mut self, item: &Item, buffer: &mut BytesMut) {
self(item, buffer)
}
}
//
// Implementer's notes: We store future values in `Source` and `Sink`. These future values have to
// satisfy the `'static` lifetime bound because trait methods like `Stream::poll_next` do not take
// a lifetime parameter on `&mut Self`. To satisfy this, when producing a future value, we move
// all related values into it, and they will be moved back after it is completed (so that we can
// produce the next future value).
//
// There may be other ways to satisfy the `'static` lifetime bound, but for now, this "move" trick
// is the best I have.
//
/// Byte Stream to `futures::stream::Stream` Adapter
#[derive(DebugExt)]
pub struct Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
#[debug(with = InsertPlaceholder)]
source: Option<(Stream, Decoder)>,
#[debug(with = InsertPlaceholder)]
next_future: Option<SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error>>,
}
/// `futures::sink::Sink` to Byte Stream Adapter
#[derive(DebugExt)]
pub struct Sink<Stream, Encoder>
where
Stream: StreamSend,
{
#[debug(with = InsertPlaceholder)]
stream: Option<Stream>,
#[debug(with = InsertPlaceholder)]
encoder: Encoder,
#[debug(with = InsertPlaceholder)]
flush_future: Option<SinkFuture<Stream, Stream::Error>>,
#[debug(with = InsertPlaceholder)]
close_future: Option<SinkFuture<Stream, Stream::Error>>,
}
// TODO: Use where clauses to simplify these type aliases when rustc starts enforcing where clauses
// in type aliases. For more details, check [rust-lang/rust#21903][#21903].
//
// [#21903]: https://github.com/rust-lang/rust/issues/21903
type SourceFuture<Stream, Decoder, Item, Error> =
BoxFuture<'static, SourceOutput<Stream, Decoder, Item, Error>>;
type SourceOutput<Stream, Decoder, Item, Error> = ((Stream, Decoder), Option<Result<Item, Error>>);
type SinkFuture<Stream, Error> = BoxFuture<'static, SinkOutput<Stream, Error>>;
type SinkOutput<Stream, Error> = (Stream, Result<(), Error>);
macro_rules! poll {
($this:ident, $get_future:ident, $context:ident $(,)?) => {
match $this.$get_future().as_mut().poll($context) {
Poll::Ready((state, result)) => {
$this.reset(state);
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
};
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
pub fn new(stream: Stream, decoder: Decoder) -> Self {
Self {
source: Some((stream, decoder)),
next_future: None,
}
}
fn reset(&mut self, source: (Stream, Decoder)) {
self.source = Some(source);
self.next_future = None;
}
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv + Send +'static,
Decoder: Decode<Stream> + Send +'static,
{
fn next_future(&mut self) -> &mut SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error> {
self.next_future
.get_or_insert_with(|| Box::pin(Self::next(self.source.take().unwrap())))
}
async fn next(
(mut stream, mut decoder): (Stream, Decoder),
) -> SourceOutput<Stream, Decoder, Decoder::Item, Decoder::Error> {
let result = decoder.decode(&mut stream).await.transpose();
((stream, decoder), result)
}
}
impl<Stream, Decoder> stream::Stream for Source<Stream, Decoder>
where
Stream: StreamRecv + Send + Unpin +'static,
Decoder: Decode<Stream> + Send + Unpin +'static,
{
type Item = Result<Decoder::Item, Decoder::Error>;
fn poll_next(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
poll!(this, next_future, context)
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend,
{
pub fn new(stream: Stream, encoder: Encoder) -> Self {
Self {
stream: Some(stream),
encoder,
flush_future: None,
close_future: None,
}
}
fn reset(&mut self, stream: Stream) {
self.stream = Some(stream);
self.flush_future = None;
self.close_future = None;
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend + Send +'static,
Encoder:'static,
{
fn flush_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.flush_future
.get_or_insert_with(|| Box::pin(Self::flush(self.stream.take().unwrap())))
}
async fn flush(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.send_all().await;
(stream, result)
}
fn close_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.close_future
.get_or_insert_with(|| Box::pin(Self::close(self.stream.take().unwrap())))
}
async fn close(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.shutdown().await;
(stream, result)
}
}
impl<Stream, Encoder, Item> sink::Sink<Item> for Sink<Stream, Encoder>
where
Stream: StreamSend + Send + Unpin +'static,
Encoder: Encode<Item> + Unpin +'static,
{
type Error = Stream::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.get_mut().stream {
Some(_) => Poll::Ready(Ok(())),
None => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
let this = self.get_mut();
this.encoder
.encode(&item, &mut this.stream.as_mut().unwrap().buffer());
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, flush_future, context)
}
fn poll_close(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, close_future, context)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::{Error, ErrorKind};
use bytes::{Buf, BufMut};
use futures::{sink::SinkExt, stream::StreamExt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::io::{RecvStream, SendStream};
use super::*;
fn decode(buffer: &mut BytesMut) -> Result<Option<String>, Error> {
if buffer.remaining() < 1 {
return Ok(None);
}
let size = usize::from(buffer[0]);
if buffer.remaining() < 1 + size {
return Ok(None);
}
let mut vec = vec![0u8; size];
buffer.get_u8();
buffer.copy_to_slice(&mut vec);
Ok(Some(String::from_utf8(vec).unwrap()))
}
#[tokio::test]
async fn source() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello world\x03foo\x03bar")
.await
.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Ok(x)) if x == "hello world");
assert_matches!(source.next().await, Some(Ok(x)) if x == "foo");
assert_matches!(source.next().await, Some(Ok(x)) if x == "bar");
assert_matches!(source.next().await, None);
}
#[tokio::test]
async fn source_unexpected_eof() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello").await.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Err(e)) if e.kind() == ErrorKind::UnexpectedEof);
}
fn encode(item: &String, buffer: &mut BytesMut) {
buffer.put_u8(item.len().try_into().unwrap());
buffer.put_slice(item.as_bytes());
}
#[tokio::test]
async fn sink() {
let (stream, mut mock) = SendStream::new_mock(4096);
let mut sink = Sink::new(stream, encode);
assert_matches!(sink.feed("hello world".to_string()).await, Ok(()));
assert_matches!(sink.flush().await, Ok(()));
let mut buffer = BytesMut::new();
assert_matches!(mock.read_buf(&mut buffer).await, Ok(12));
assert_eq!(buffer.as_ref(), b"\x0bhello world");
assert_matches!(sink.feed("foo".to_string()).await, Ok(()));
assert_matches!(sink.feed("bar".to_string()).await, Ok(()));
assert_matches!(sink.close().await, Ok(()));
buffer.clear();
assert_matches!(mock.read_buf(&mut buffer).await, Ok(8));
assert_eq!(buffer.as_ref(), b"\x03foo\x03bar");
}
}
| decode | identifier_name |
aes.rs | 11] = [
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
];
const SBOX: [u8; 256] = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,
];
const INV_SBOX: [u8; 256] = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
];
fn sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = SBOX[*e as usize];
}
}
fn inv_sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = INV_SBOX[*e as usize];
}
}
fn to_four_byte_array(data: &[u8]) -> [u8; 4] {
[data[0], data[1], data[2], data[3]]
}
enum KeyExpansionMode {
Xor,
Sbox,
Full,
}
fn generate_four_bytes(
key_length: usize,
expanded_key: &[u8],
rcon_iteration: &mut usize,
mode: KeyExpansionMode,
) -> [u8; 4] {
let i = expanded_key.len();
let source_bytes = &expanded_key[i - 4..i];
let mut t: [u8; 4] = to_four_byte_array(source_bytes);
match mode {
KeyExpansionMode::Xor => {}
KeyExpansionMode::Sbox => {
sbox(&mut t);
}
KeyExpansionMode::Full => {
t.rotate_left(1);
sbox(&mut t);
t[0].bitxor_assign(RCON[*rcon_iteration]);
*rcon_iteration += 1;
}
};
let xor_source = &expanded_key[i - key_length..i - key_length + 4];
xor::buffer_mut(&mut t, xor::Key::FullBuffer(xor_source));
t
}
fn expand_key(key: &[u8]) -> Vec<u8> {
let key_length = key.len();
let (rounds, sbox_round, extra_expansions) = match key_length {
16 => (10, false, 0),
24 => (12, false, 2),
32 => (14, true, 3),
len => panic!("Unsupported key length {}", len),
};
let expanded_key_size = 16 * (rounds + 1);
let mut expanded_key = Vec::with_capacity(expanded_key_size);
expanded_key.extend_from_slice(&key);
let mut rcon_iteration = 1usize;
while expanded_key.len() < expanded_key_size {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Full,
);
expanded_key.extend(t.iter());
for _i in 0..3 {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
if sbox_round {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Sbox,
);
expanded_key.extend(t.iter());
}
for _i in 0..extra_expansions {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
}
// Truncate any extra bytes
expanded_key.resize(expanded_key_size, 0);
assert!(
expanded_key.len() == expanded_key_size,
"Expanded key is too long: {}",
expanded_key.len(),
);
expanded_key
}
#[test]
fn expand_key_16() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string(
"00000000000000000000000000000000626363636263636362636363626363639b9898c9f9fbfbaa9b9898c9f9fbfbaa90973450696ccffaf2f457330b0fac99ee06da7b876a1581759e42b27e91ee2b7f2e2b88f8443e098dda7cbbf34b9290ec614b851425758c99ff09376ab49ba7217517873550620bacaf6b3cc61bf09b0ef903333ba9613897060a04511dfa9fb1d4d8e28a7db9da1d7bb3de4c664941b4ef5bcb3e92e21123e951cf6f8f188e"
),
expand_key(&vec![0; 16])
);
assert_eq!(
parse_byte_string(
"ffffffffffffffffffffffffffffffffe8e9e9e917161616e8e9e9e917161616adaeae19bab8b80f525151e6454747f0090e2277b3b69a78e1e7cb9ea4a08c6ee16abd3e52dc2746b33becd8179b60b6e5baf3ceb766d488045d385013c658e671d07db3c6b6a93bc2eb916bd12dc98de90d208d2fbb89b6ed5018dd3c7dd15096337366b988fad054d8e20d68a5335d8bf03f233278c5f366a027fe0e0514a3d60a3588e472f07b82d2d7858cd7c326"
),
expand_key(&vec![0xff; 16])
);
assert_eq!(
parse_byte_string(
"000102030405060708090a0b0c0d0e0fd6aa74fdd2af72fadaa678f1d6ab76feb692cf0b643dbdf1be9bc5006830b3feb6ff744ed2c2c9bf6c590cbf0469bf4147f7f7bc95353e03f96c32bcfd058dfd3caaa3e8a99f9deb50f3af57adf622aa5e390f7df7a69296a7553dc10aa31f6b14f9701ae35fe28c440adf4d4ea9c02647438735a41c65b9e016baf4aebf7ad2549932d1f08557681093ed9cbe2c974e13111d7fe3944a17f307a78b4d2b30c5"
),
expand_key(&parse_byte_string("000102030405060708090a0b0c0d0e0f"))
);
assert_eq!(
parse_byte_string(
"6920e299a5202a6d656e636869746f2afa8807605fa82d0d3ac64e6553b2214fcf75838d90ddae80aa1be0e5f9a9c1aa180d2f1488d0819422cb6171db62a0dbbaed96ad323d173910f67648cb94d693881b4ab2ba265d8baad02bc36144fd50b34f195d096944d6a3b96f15c2fd9245a7007778ae6933ae0dd05cbbcf2dcefeff8bccf251e2ff5c5c32a3e7931f6d1924b7182e7555e77229674495ba78298cae127cdadb479ba8f220df3d4858f6b1"
),
expand_key(&parse_byte_string("6920e299a5202a6d656e636869746f2a"))
);
}
#[test]
fn expand_key_24() {
assert_eq!(208, expand_key(&vec![0; 24]).len());
}
#[test]
fn expand_key_32() {
assert_eq!(240, expand_key(&vec![0; 32]).len());
}
fn add_round_key(state: &mut [u8], key: &[u8]) {
xor::buffer_mut(state, xor::Key::FullBuffer(key));
}
// Shifted by 0, 1, 2, 3 columns
const ROW_SHIFTS: [usize; 16] = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11];
fn shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[ROW_SHIFTS[index]];
}
}
#[test]
fn test_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_eq!(
rows,
[1, 6, 11, 16, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12]
);
}
// Shifted by 0, -1, -2, -3 columns
const INV_ROW_SHIFTS: [usize; 16] = [0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3];
fn inv_shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[INV_ROW_SHIFTS[index]];
}
}
#[test]
fn test_inv_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
inv_shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
assert_eq!(
rows,
[1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 16, 13, 10, 7, 4,]
);
}
#[test]
fn test_shift_rows_ident() |
const COLUMN_MATRIX: [u8; 16] = [2, 3, 1, 1, 1, 2, 3, 1, 1, 1, 2, 3, 3, 1, 1, 2];
const INV_COLUMN_MATRIX: [u8; 16] = [14, 11, 13, 9, 9, 14, 11, 13, 13, 9, 14, 11, 11, 13, 9, 14];
fn gmul(mut a: u8, mut b: u8) -> u8 {
let mut p = 0;
for _ in 0..8 {
if (b & 0x1)!= 0 {
p.bitxor_assign(a);
}
let has_high_bit = (a & 0x80) == 0x80;
a <<= 1;
if has_high_bit {
a.bitxor_assign(0x1b);
}
b >>= 1;
}
p
}
fn mix_column(matrix: &[u8; 16], state_column: &[u8]) -> Vec<u8> {
matrix
.chunks(4)
.map(|mc| {
mc.iter()
.enumerate()
.map(|(i, &coefficient)| gmul(coefficient, state_column[i]))
.fold(None, |accum, current| match accum {
None => Some(current),
Some(x) => Some(x.bitxor(current)),
})
.unwrap()
})
.collect()
}
#[test]
fn test_mix_column() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string("8e4da1bc"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("db135345")),
);
assert_eq!(
parse_byte_string("9fdc589d"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("f20a225c")),
);
assert_eq!(
parse_byte_string("01010101"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("01010101")),
);
assert_eq!(
parse_byte_string("c6c6c6c6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("c6c6c6c6")),
);
assert_eq!(
parse_byte_string("d5d5d7d6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("d4d4d4d5")),
);
assert_eq!(
parse_byte_string("4d7ebdf8"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("2d26314c")),
);
}
fn mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
fn inv_mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&INV_COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
#[derive(Clone)]
pub enum CipherMode {
ECB,
CBC([u8; 16]),
}
fn transform_chunk(chunk: &[u8], expanded_key: &[u8], operation: Operation) -> [u8; 16] {
const STATE_SIZE: usize = 16;
assert!(
chunk.len() == STATE_SIZE,
"Chunk size of {} is invalid; expected {}",
chunk.len(),
STATE_SIZE
);
let last_round = expanded_key.chunks(STATE_SIZE).count() - 1;
let mut state = util::convert_to_fixed_array(chunk);
match operation {
Operation::Encrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n!= last_round => {
sbox(&mut state);
shift_rows(&mut state);
mix_columns(&mut state);
add_round_key(&mut state, round_key);
}
_ => {
sbox(&mut state);
shift_rows(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
Operation::Decrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).rev().enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n!= last_round => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
inv_mix_columns(&mut state);
}
_ => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
};
state
}
#[derive(Clone, Copy)]
pub enum Operation {
Encrypt,
Decrypt,
}
trait CipherModeImpl {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8>;
}
struct ECBCipherMode {}
struct CBCCipherMode {
initialization_vector: [u8; 16],
operation: Operation,
}
impl CBCCipherMode {
fn transform(&mut self, chunk: &[u8; 16], transform: &Fn(&[u8; 16]) -> [u8; 16]) -> [u8; 16] {
match self.operation {
Operation::Encrypt => {
xor::buffer_mut(&mut self.initialization_vector, xor::Key::FullBuffer(chunk));
self.initialization_vector = transform(&self.initialization_vector);
util::convert_to_fixed_array(&self.initialization_vector)
}
Operation::Decrypt => {
let mut plaintext = transform(chunk);
xor::buffer_mut(
&mut plaintext,
xor::Key::FullBuffer(&self.initialization_vector),
);
self.initialization_vector = util::convert_to_fixed_array(chunk);
plaintext
}
}
}
}
impl CipherModeImpl for ECBCipherMode {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8> {
let mut v = Vec::with_capacity(data.len());
for chunk in data.chunks(16) {
v.extend(&transform(&util::convert_to_fixed_array(chunk)));
}
v
}
}
impl CipherModeImpl for CBCCipherMode {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8> {
let mut v = Vec::with_capacity(data.len());
for chunk in data.chunks(16) {
v.extend(
self
.transform(&util::convert_to_fixed_array(chunk), transform)
.iter(),
);
}
v
}
}
pub fn perform(data: &[u8], key: &[u8], operation: Operation, cipher_mode: CipherMode) -> Vec<u8> {
let expanded_key = expand_key(key);
let mut cipher_mode_impl: Box<CipherModeImpl> = match cipher_mode {
CipherMode::ECB => Box::new(ECBCipherMode {}),
CipherMode::CBC(iv) => Box::new(CBCCipherMode {
initialization_vector: iv,
operation,
}),
};
cipher_mode_impl.transform_chunks(data, &|pre_transformed_chunk| {
transform_chunk(pre_transformed_chunk, &expanded_key, operation)
})
}
#[test]
fn ecb_once_16() {
let plaintext = &vec![0; 16];
let key = &vec![0; 16];
let ciphertext = perform(&plaintext, &key, Operation::Encrypt, CipherMode::ECB);
assert_eq!(
plaintext,
&perform(&ciphertext, &key, Operation::Decrypt, CipherMode::ECB)
);
}
#[test]
fn ecb_once_24() {
let plaintext = &vec![0; 32];
let key = &vec![0; 24];
let ciphertext = perform(&plaintext, &key, Operation::Encrypt, CipherMode::ECB);
assert_eq!(
plaintext,
&perform(&ciphertext, &key, Operation::Decrypt, CipherMode::ECB)
);
}
#[test]
fn ecb_once_32() { | {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
inv_shift_rows(&mut rows);
assert_eq!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
} | identifier_body |
aes.rs | 11] = [
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
];
const SBOX: [u8; 256] = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,
];
const INV_SBOX: [u8; 256] = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
];
fn sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = SBOX[*e as usize];
}
}
fn inv_sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = INV_SBOX[*e as usize];
}
}
fn to_four_byte_array(data: &[u8]) -> [u8; 4] {
[data[0], data[1], data[2], data[3]]
}
enum KeyExpansionMode {
Xor,
Sbox,
Full,
}
fn generate_four_bytes(
key_length: usize,
expanded_key: &[u8],
rcon_iteration: &mut usize,
mode: KeyExpansionMode,
) -> [u8; 4] {
let i = expanded_key.len();
let source_bytes = &expanded_key[i - 4..i];
let mut t: [u8; 4] = to_four_byte_array(source_bytes);
match mode {
KeyExpansionMode::Xor => {}
KeyExpansionMode::Sbox => {
sbox(&mut t);
}
KeyExpansionMode::Full => {
t.rotate_left(1);
sbox(&mut t);
t[0].bitxor_assign(RCON[*rcon_iteration]);
*rcon_iteration += 1;
}
};
let xor_source = &expanded_key[i - key_length..i - key_length + 4];
xor::buffer_mut(&mut t, xor::Key::FullBuffer(xor_source));
t
}
fn | (key: &[u8]) -> Vec<u8> {
let key_length = key.len();
let (rounds, sbox_round, extra_expansions) = match key_length {
16 => (10, false, 0),
24 => (12, false, 2),
32 => (14, true, 3),
len => panic!("Unsupported key length {}", len),
};
let expanded_key_size = 16 * (rounds + 1);
let mut expanded_key = Vec::with_capacity(expanded_key_size);
expanded_key.extend_from_slice(&key);
let mut rcon_iteration = 1usize;
while expanded_key.len() < expanded_key_size {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Full,
);
expanded_key.extend(t.iter());
for _i in 0..3 {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
if sbox_round {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Sbox,
);
expanded_key.extend(t.iter());
}
for _i in 0..extra_expansions {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
}
// Truncate any extra bytes
expanded_key.resize(expanded_key_size, 0);
assert!(
expanded_key.len() == expanded_key_size,
"Expanded key is too long: {}",
expanded_key.len(),
);
expanded_key
}
#[test]
fn expand_key_16() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string(
"00000000000000000000000000000000626363636263636362636363626363639b9898c9f9fbfbaa9b9898c9f9fbfbaa90973450696ccffaf2f457330b0fac99ee06da7b876a1581759e42b27e91ee2b7f2e2b88f8443e098dda7cbbf34b9290ec614b851425758c99ff09376ab49ba7217517873550620bacaf6b3cc61bf09b0ef903333ba9613897060a04511dfa9fb1d4d8e28a7db9da1d7bb3de4c664941b4ef5bcb3e92e21123e951cf6f8f188e"
),
expand_key(&vec![0; 16])
);
assert_eq!(
parse_byte_string(
"ffffffffffffffffffffffffffffffffe8e9e9e917161616e8e9e9e917161616adaeae19bab8b80f525151e6454747f0090e2277b3b69a78e1e7cb9ea4a08c6ee16abd3e52dc2746b33becd8179b60b6e5baf3ceb766d488045d385013c658e671d07db3c6b6a93bc2eb916bd12dc98de90d208d2fbb89b6ed5018dd3c7dd15096337366b988fad054d8e20d68a5335d8bf03f233278c5f366a027fe0e0514a3d60a3588e472f07b82d2d7858cd7c326"
),
expand_key(&vec![0xff; 16])
);
assert_eq!(
parse_byte_string(
"000102030405060708090a0b0c0d0e0fd6aa74fdd2af72fadaa678f1d6ab76feb692cf0b643dbdf1be9bc5006830b3feb6ff744ed2c2c9bf6c590cbf0469bf4147f7f7bc95353e03f96c32bcfd058dfd3caaa3e8a99f9deb50f3af57adf622aa5e390f7df7a69296a7553dc10aa31f6b14f9701ae35fe28c440adf4d4ea9c02647438735a41c65b9e016baf4aebf7ad2549932d1f08557681093ed9cbe2c974e13111d7fe3944a17f307a78b4d2b30c5"
),
expand_key(&parse_byte_string("000102030405060708090a0b0c0d0e0f"))
);
assert_eq!(
parse_byte_string(
"6920e299a5202a6d656e636869746f2afa8807605fa82d0d3ac64e6553b2214fcf75838d90ddae80aa1be0e5f9a9c1aa180d2f1488d0819422cb6171db62a0dbbaed96ad323d173910f67648cb94d693881b4ab2ba265d8baad02bc36144fd50b34f195d096944d6a3b96f15c2fd9245a7007778ae6933ae0dd05cbbcf2dcefeff8bccf251e2ff5c5c32a3e7931f6d1924b7182e7555e77229674495ba78298cae127cdadb479ba8f220df3d4858f6b1"
),
expand_key(&parse_byte_string("6920e299a5202a6d656e636869746f2a"))
);
}
#[test]
fn expand_key_24() {
assert_eq!(208, expand_key(&vec![0; 24]).len());
}
#[test]
fn expand_key_32() {
assert_eq!(240, expand_key(&vec![0; 32]).len());
}
fn add_round_key(state: &mut [u8], key: &[u8]) {
xor::buffer_mut(state, xor::Key::FullBuffer(key));
}
// Shifted by 0, 1, 2, 3 columns
const ROW_SHIFTS: [usize; 16] = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11];
fn shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[ROW_SHIFTS[index]];
}
}
#[test]
fn test_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_eq!(
rows,
[1, 6, 11, 16, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12]
);
}
// Shifted by 0, -1, -2, -3 columns
const INV_ROW_SHIFTS: [usize; 16] = [0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3];
fn inv_shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[INV_ROW_SHIFTS[index]];
}
}
#[test]
fn test_inv_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
inv_shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
assert_eq!(
rows,
[1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 16, 13, 10, 7, 4,]
);
}
#[test]
fn test_shift_rows_ident() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
inv_shift_rows(&mut rows);
assert_eq!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
}
const COLUMN_MATRIX: [u8; 16] = [2, 3, 1, 1, 1, 2, 3, 1, 1, 1, 2, 3, 3, 1, 1, 2];
const INV_COLUMN_MATRIX: [u8; 16] = [14, 11, 13, 9, 9, 14, 11, 13, 13, 9, 14, 11, 11, 13, 9, 14];
fn gmul(mut a: u8, mut b: u8) -> u8 {
let mut p = 0;
for _ in 0..8 {
if (b & 0x1)!= 0 {
p.bitxor_assign(a);
}
let has_high_bit = (a & 0x80) == 0x80;
a <<= 1;
if has_high_bit {
a.bitxor_assign(0x1b);
}
b >>= 1;
}
p
}
fn mix_column(matrix: &[u8; 16], state_column: &[u8]) -> Vec<u8> {
matrix
.chunks(4)
.map(|mc| {
mc.iter()
.enumerate()
.map(|(i, &coefficient)| gmul(coefficient, state_column[i]))
.fold(None, |accum, current| match accum {
None => Some(current),
Some(x) => Some(x.bitxor(current)),
})
.unwrap()
})
.collect()
}
#[test]
fn test_mix_column() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string("8e4da1bc"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("db135345")),
);
assert_eq!(
parse_byte_string("9fdc589d"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("f20a225c")),
);
assert_eq!(
parse_byte_string("01010101"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("01010101")),
);
assert_eq!(
parse_byte_string("c6c6c6c6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("c6c6c6c6")),
);
assert_eq!(
parse_byte_string("d5d5d7d6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("d4d4d4d5")),
);
assert_eq!(
parse_byte_string("4d7ebdf8"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("2d26314c")),
);
}
fn mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
fn inv_mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&INV_COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
#[derive(Clone)]
pub enum CipherMode {
ECB,
CBC([u8; 16]),
}
fn transform_chunk(chunk: &[u8], expanded_key: &[u8], operation: Operation) -> [u8; 16] {
const STATE_SIZE: usize = 16;
assert!(
chunk.len() == STATE_SIZE,
"Chunk size of {} is invalid; expected {}",
chunk.len(),
STATE_SIZE
);
let last_round = expanded_key.chunks(STATE_SIZE).count() - 1;
let mut state = util::convert_to_fixed_array(chunk);
match operation {
Operation::Encrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n!= last_round => {
sbox(&mut state);
shift_rows(&mut state);
mix_columns(&mut state);
add_round_key(&mut state, round_key);
}
_ => {
sbox(&mut state);
shift_rows(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
Operation::Decrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).rev().enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n!= last_round => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
inv_mix_columns(&mut state);
}
_ => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
};
state
}
#[derive(Clone, Copy)]
pub enum Operation {
Encrypt,
Decrypt,
}
trait CipherModeImpl {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8>;
}
struct ECBCipherMode {}
struct CBCCipherMode {
initialization_vector: [u8; 16],
operation: Operation,
}
impl CBCCipherMode {
fn transform(&mut self, chunk: &[u8; 16], transform: &Fn(&[u8; 16]) -> [u8; 16]) -> [u8; 16] {
match self.operation {
Operation::Encrypt => {
xor::buffer_mut(&mut self.initialization_vector, xor::Key::FullBuffer(chunk));
self.initialization_vector = transform(&self.initialization_vector);
util::convert_to_fixed_array(&self.initialization_vector)
}
Operation::Decrypt => {
let mut plaintext = transform(chunk);
xor::buffer_mut(
&mut plaintext,
xor::Key::FullBuffer(&self.initialization_vector),
);
self.initialization_vector = util::convert_to_fixed_array(chunk);
plaintext
}
}
}
}
impl CipherModeImpl for ECBCipherMode {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8> {
let mut v = Vec::with_capacity(data.len());
for chunk in data.chunks(16) {
v.extend(&transform(&util::convert_to_fixed_array(chunk)));
}
v
}
}
impl CipherModeImpl for CBCCipherMode {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8> {
let mut v = Vec::with_capacity(data.len());
for chunk in data.chunks(16) {
v.extend(
self
.transform(&util::convert_to_fixed_array(chunk), transform)
.iter(),
);
}
v
}
}
pub fn perform(data: &[u8], key: &[u8], operation: Operation, cipher_mode: CipherMode) -> Vec<u8> {
let expanded_key = expand_key(key);
let mut cipher_mode_impl: Box<CipherModeImpl> = match cipher_mode {
CipherMode::ECB => Box::new(ECBCipherMode {}),
CipherMode::CBC(iv) => Box::new(CBCCipherMode {
initialization_vector: iv,
operation,
}),
};
cipher_mode_impl.transform_chunks(data, &|pre_transformed_chunk| {
transform_chunk(pre_transformed_chunk, &expanded_key, operation)
})
}
#[test]
fn ecb_once_16() {
let plaintext = &vec![0; 16];
let key = &vec![0; 16];
let ciphertext = perform(&plaintext, &key, Operation::Encrypt, CipherMode::ECB);
assert_eq!(
plaintext,
&perform(&ciphertext, &key, Operation::Decrypt, CipherMode::ECB)
);
}
#[test]
fn ecb_once_24() {
let plaintext = &vec![0; 32];
let key = &vec![0; 24];
let ciphertext = perform(&plaintext, &key, Operation::Encrypt, CipherMode::ECB);
assert_eq!(
plaintext,
&perform(&ciphertext, &key, Operation::Decrypt, CipherMode::ECB)
);
}
#[test]
fn ecb_once_32() { | expand_key | identifier_name |
aes.rs | 8; 11] = [
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
];
const SBOX: [u8; 256] = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,
];
const INV_SBOX: [u8; 256] = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
];
fn sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = SBOX[*e as usize];
}
}
fn inv_sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = INV_SBOX[*e as usize];
}
}
fn to_four_byte_array(data: &[u8]) -> [u8; 4] {
[data[0], data[1], data[2], data[3]]
}
enum KeyExpansionMode {
Xor,
Sbox,
Full,
}
fn generate_four_bytes(
key_length: usize,
expanded_key: &[u8],
rcon_iteration: &mut usize,
mode: KeyExpansionMode,
) -> [u8; 4] {
let i = expanded_key.len();
let source_bytes = &expanded_key[i - 4..i];
let mut t: [u8; 4] = to_four_byte_array(source_bytes);
match mode {
KeyExpansionMode::Xor => {}
KeyExpansionMode::Sbox => {
sbox(&mut t);
}
KeyExpansionMode::Full => {
t.rotate_left(1);
sbox(&mut t);
t[0].bitxor_assign(RCON[*rcon_iteration]);
*rcon_iteration += 1;
}
};
let xor_source = &expanded_key[i - key_length..i - key_length + 4];
xor::buffer_mut(&mut t, xor::Key::FullBuffer(xor_source));
t
}
fn expand_key(key: &[u8]) -> Vec<u8> {
let key_length = key.len();
let (rounds, sbox_round, extra_expansions) = match key_length {
16 => (10, false, 0),
24 => (12, false, 2),
32 => (14, true, 3),
len => panic!("Unsupported key length {}", len),
};
let expanded_key_size = 16 * (rounds + 1);
let mut expanded_key = Vec::with_capacity(expanded_key_size);
expanded_key.extend_from_slice(&key);
let mut rcon_iteration = 1usize;
while expanded_key.len() < expanded_key_size {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Full,
);
expanded_key.extend(t.iter());
for _i in 0..3 {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
if sbox_round {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Sbox,
);
expanded_key.extend(t.iter());
}
for _i in 0..extra_expansions {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
}
// Truncate any extra bytes
expanded_key.resize(expanded_key_size, 0);
assert!(
expanded_key.len() == expanded_key_size,
"Expanded key is too long: {}",
expanded_key.len(),
);
expanded_key
}
#[test]
fn expand_key_16() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string(
"00000000000000000000000000000000626363636263636362636363626363639b9898c9f9fbfbaa9b9898c9f9fbfbaa90973450696ccffaf2f457330b0fac99ee06da7b876a1581759e42b27e91ee2b7f2e2b88f8443e098dda7cbbf34b9290ec614b851425758c99ff09376ab49ba7217517873550620bacaf6b3cc61bf09b0ef903333ba9613897060a04511dfa9fb1d4d8e28a7db9da1d7bb3de4c664941b4ef5bcb3e92e21123e951cf6f8f188e"
),
expand_key(&vec![0; 16])
);
assert_eq!(
parse_byte_string(
"ffffffffffffffffffffffffffffffffe8e9e9e917161616e8e9e9e917161616adaeae19bab8b80f525151e6454747f0090e2277b3b69a78e1e7cb9ea4a08c6ee16abd3e52dc2746b33becd8179b60b6e5baf3ceb766d488045d385013c658e671d07db3c6b6a93bc2eb916bd12dc98de90d208d2fbb89b6ed5018dd3c7dd15096337366b988fad054d8e20d68a5335d8bf03f233278c5f366a027fe0e0514a3d60a3588e472f07b82d2d7858cd7c326"
),
expand_key(&vec![0xff; 16])
);
assert_eq!(
parse_byte_string(
"000102030405060708090a0b0c0d0e0fd6aa74fdd2af72fadaa678f1d6ab76feb692cf0b643dbdf1be9bc5006830b3feb6ff744ed2c2c9bf6c590cbf0469bf4147f7f7bc95353e03f96c32bcfd058dfd3caaa3e8a99f9deb50f3af57adf622aa5e390f7df7a69296a7553dc10aa31f6b14f9701ae35fe28c440adf4d4ea9c02647438735a41c65b9e016baf4aebf7ad2549932d1f08557681093ed9cbe2c974e13111d7fe3944a17f307a78b4d2b30c5"
),
expand_key(&parse_byte_string("000102030405060708090a0b0c0d0e0f"))
);
assert_eq!(
parse_byte_string(
"6920e299a5202a6d656e636869746f2afa8807605fa82d0d3ac64e6553b2214fcf75838d90ddae80aa1be0e5f9a9c1aa180d2f1488d0819422cb6171db62a0dbbaed96ad323d173910f67648cb94d693881b4ab2ba265d8baad02bc36144fd50b34f195d096944d6a3b96f15c2fd9245a7007778ae6933ae0dd05cbbcf2dcefeff8bccf251e2ff5c5c32a3e7931f6d1924b7182e7555e77229674495ba78298cae127cdadb479ba8f220df3d4858f6b1"
),
expand_key(&parse_byte_string("6920e299a5202a6d656e636869746f2a"))
);
}
#[test]
fn expand_key_24() {
assert_eq!(208, expand_key(&vec![0; 24]).len());
}
#[test]
fn expand_key_32() {
assert_eq!(240, expand_key(&vec![0; 32]).len());
}
fn add_round_key(state: &mut [u8], key: &[u8]) {
xor::buffer_mut(state, xor::Key::FullBuffer(key));
}
// Shifted by 0, 1, 2, 3 columns
const ROW_SHIFTS: [usize; 16] = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11];
fn shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[ROW_SHIFTS[index]];
}
}
#[test]
fn test_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_eq!(
rows,
[1, 6, 11, 16, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12]
);
}
// Shifted by 0, -1, -2, -3 columns
const INV_ROW_SHIFTS: [usize; 16] = [0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3];
fn inv_shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[INV_ROW_SHIFTS[index]];
}
}
#[test]
fn test_inv_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
inv_shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
assert_eq!(
rows,
[1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 16, 13, 10, 7, 4,]
);
}
#[test]
fn test_shift_rows_ident() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
inv_shift_rows(&mut rows);
assert_eq!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
}
const COLUMN_MATRIX: [u8; 16] = [2, 3, 1, 1, 1, 2, 3, 1, 1, 1, 2, 3, 3, 1, 1, 2];
const INV_COLUMN_MATRIX: [u8; 16] = [14, 11, 13, 9, 9, 14, 11, 13, 13, 9, 14, 11, 11, 13, 9, 14];
fn gmul(mut a: u8, mut b: u8) -> u8 {
let mut p = 0;
for _ in 0..8 {
if (b & 0x1)!= 0 {
p.bitxor_assign(a);
}
let has_high_bit = (a & 0x80) == 0x80;
a <<= 1;
if has_high_bit {
a.bitxor_assign(0x1b);
}
b >>= 1;
}
p
}
fn mix_column(matrix: &[u8; 16], state_column: &[u8]) -> Vec<u8> {
matrix
.chunks(4)
.map(|mc| {
mc.iter()
.enumerate()
.map(|(i, &coefficient)| gmul(coefficient, state_column[i]))
.fold(None, |accum, current| match accum {
None => Some(current),
Some(x) => Some(x.bitxor(current)),
})
.unwrap()
})
.collect()
}
#[test]
fn test_mix_column() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string("8e4da1bc"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("db135345")),
);
assert_eq!(
parse_byte_string("9fdc589d"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("f20a225c")),
);
assert_eq!(
parse_byte_string("01010101"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("01010101")),
);
assert_eq!(
parse_byte_string("c6c6c6c6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("c6c6c6c6")),
);
assert_eq!(
parse_byte_string("d5d5d7d6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("d4d4d4d5")),
);
assert_eq!(
parse_byte_string("4d7ebdf8"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("2d26314c")),
);
}
fn mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
fn inv_mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&INV_COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
#[derive(Clone)]
pub enum CipherMode {
ECB,
CBC([u8; 16]),
}
fn transform_chunk(chunk: &[u8], expanded_key: &[u8], operation: Operation) -> [u8; 16] {
const STATE_SIZE: usize = 16;
assert!(
chunk.len() == STATE_SIZE,
"Chunk size of {} is invalid; expected {}",
chunk.len(),
STATE_SIZE
);
let last_round = expanded_key.chunks(STATE_SIZE).count() - 1;
let mut state = util::convert_to_fixed_array(chunk);
match operation {
Operation::Encrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n!= last_round => {
sbox(&mut state);
shift_rows(&mut state);
mix_columns(&mut state);
add_round_key(&mut state, round_key);
}
_ => {
sbox(&mut state);
shift_rows(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
Operation::Decrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).rev().enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n!= last_round => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
inv_mix_columns(&mut state);
}
_ => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
};
state
}
#[derive(Clone, Copy)]
pub enum Operation {
Encrypt,
Decrypt,
}
trait CipherModeImpl {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8>;
}
struct ECBCipherMode {}
struct CBCCipherMode {
initialization_vector: [u8; 16],
operation: Operation, | impl CBCCipherMode {
fn transform(&mut self, chunk: &[u8; 16], transform: &Fn(&[u8; 16]) -> [u8; 16]) -> [u8; 16] {
match self.operation {
Operation::Encrypt => {
xor::buffer_mut(&mut self.initialization_vector, xor::Key::FullBuffer(chunk));
self.initialization_vector = transform(&self.initialization_vector);
util::convert_to_fixed_array(&self.initialization_vector)
}
Operation::Decrypt => {
let mut plaintext = transform(chunk);
xor::buffer_mut(
&mut plaintext,
xor::Key::FullBuffer(&self.initialization_vector),
);
self.initialization_vector = util::convert_to_fixed_array(chunk);
plaintext
}
}
}
}
impl CipherModeImpl for ECBCipherMode {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8> {
let mut v = Vec::with_capacity(data.len());
for chunk in data.chunks(16) {
v.extend(&transform(&util::convert_to_fixed_array(chunk)));
}
v
}
}
impl CipherModeImpl for CBCCipherMode {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8> {
let mut v = Vec::with_capacity(data.len());
for chunk in data.chunks(16) {
v.extend(
self
.transform(&util::convert_to_fixed_array(chunk), transform)
.iter(),
);
}
v
}
}
pub fn perform(data: &[u8], key: &[u8], operation: Operation, cipher_mode: CipherMode) -> Vec<u8> {
let expanded_key = expand_key(key);
let mut cipher_mode_impl: Box<CipherModeImpl> = match cipher_mode {
CipherMode::ECB => Box::new(ECBCipherMode {}),
CipherMode::CBC(iv) => Box::new(CBCCipherMode {
initialization_vector: iv,
operation,
}),
};
cipher_mode_impl.transform_chunks(data, &|pre_transformed_chunk| {
transform_chunk(pre_transformed_chunk, &expanded_key, operation)
})
}
#[test]
fn ecb_once_16() {
let plaintext = &vec![0; 16];
let key = &vec![0; 16];
let ciphertext = perform(&plaintext, &key, Operation::Encrypt, CipherMode::ECB);
assert_eq!(
plaintext,
&perform(&ciphertext, &key, Operation::Decrypt, CipherMode::ECB)
);
}
#[test]
fn ecb_once_24() {
let plaintext = &vec![0; 32];
let key = &vec![0; 24];
let ciphertext = perform(&plaintext, &key, Operation::Encrypt, CipherMode::ECB);
assert_eq!(
plaintext,
&perform(&ciphertext, &key, Operation::Decrypt, CipherMode::ECB)
);
}
#[test]
fn ecb_once_32() {
| }
| random_line_split |
tcp.rs | //! TcpStream wrappers that supports connecting with options
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
use std::{
io::{self, ErrorKind},
net::SocketAddr,
ops::{Deref, DerefMut},
pin::Pin,
task::{self, Poll},
};
use futures::{future, ready};
use log::{debug, warn};
use pin_project::pin_project;
use socket2::{Socket, TcpKeepalive};
use tokio::{
io::{AsyncRead, AsyncWrite, ReadBuf},
net::{TcpListener as TokioTcpListener, TcpSocket, TcpStream as TokioTcpStream},
};
use crate::{context::Context, relay::socks5::Address, ServerAddr};
use super::{
sys::{set_tcp_fastopen, TcpStream as SysTcpStream},
AcceptOpts,
ConnectOpts,
};
/// TcpStream for outbound connections
#[pin_project]
pub struct TcpStream(#[pin] SysTcpStream);
impl TcpStream {
/// Connects to address
pub async fn connect_with_opts(addr: &SocketAddr, opts: &ConnectOpts) -> io::Result<TcpStream> {
// tcp_stream_connect(addr, opts).await.map(TcpStream)
SysTcpStream::connect(*addr, opts).await.map(TcpStream)
}
/// Connects shadowsocks server
pub async fn connect_server_with_opts(
context: &Context,
addr: &ServerAddr,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
ServerAddr::SocketAddr(ref addr) => SysTcpStream::connect(*addr, opts).await?,
ServerAddr::DomainName(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
/// Connects proxy remote target
pub async fn connect_remote_with_opts(
context: &Context,
addr: &Address,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
Address::SocketAddress(ref addr) => SysTcpStream::connect(*addr, opts).await?,
Address::DomainNameAddress(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
}
impl Deref for TcpStream {
type Target = TokioTcpStream;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for TcpStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AsyncRead for TcpStream {
fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_read(cx, buf)
}
}
impl AsyncWrite for TcpStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.project().0.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TcpListener` for accepting inbound connections
pub struct TcpListener {
inner: TokioTcpListener,
accept_opts: AcceptOpts,
}
impl TcpListener {
/// Creates a new TcpListener, which will be bound to the specified address.
pub async fn bind_with_opts(addr: &SocketAddr, accept_opts: AcceptOpts) -> io::Result<TcpListener> {
let socket = match *addr {
SocketAddr::V4(..) => TcpSocket::new_v4()?,
SocketAddr::V6(..) => TcpSocket::new_v6()?,
};
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
socket.set_reuseaddr(true)?;
let set_dual_stack = if let SocketAddr::V6(ref v6) = *addr {
v6.ip().is_unspecified()
} else {
false
};
if set_dual_stack {
// Set to DUAL STACK mode by default.
// WARNING: This would fail if you want to start another program listening on the same port.
//
// Should this behavior be configurable?
fn set_only_v6(socket: &TcpSocket, only_v6: bool) {
unsafe {
// WARN: If the following code panics, FD will be closed twice.
#[cfg(unix)]
let s = Socket::from_raw_fd(socket.as_raw_fd());
#[cfg(windows)]
let s = Socket::from_raw_socket(socket.as_raw_socket());
if let Err(err) = s.set_only_v6(only_v6) {
warn!("failed to set IPV6_V6ONLY: {} for listener, error: {}", only_v6, err);
// This is not a fatal error, just warn and skip
}
#[cfg(unix)]
let _ = s.into_raw_fd();
#[cfg(windows)]
let _ = s.into_raw_socket();
}
}
set_only_v6(&socket, false);
match socket.bind(*addr) {
Ok(..) => {}
Err(ref err) if err.kind() == ErrorKind::AddrInUse => { | debug!(
"0.0.0.0:{} may have already been occupied, retry with IPV6_V6ONLY",
addr.port()
);
set_only_v6(&socket, true);
socket.bind(*addr)?;
}
Err(err) => return Err(err),
}
} else {
socket.bind(*addr)?;
}
// mio's default backlog is 1024
let inner = socket.listen(1024)?;
// Enable TFO if supported
// macos requires TCP_FASTOPEN to be set after listen(), but other platform doesn't have this constraint
if accept_opts.tcp.fastopen {
set_tcp_fastopen(&inner)?;
}
Ok(TcpListener { inner, accept_opts })
}
/// Create a `TcpListener` from tokio's `TcpListener`
pub fn from_listener(listener: TokioTcpListener, accept_opts: AcceptOpts) -> TcpListener {
TcpListener {
inner: listener,
accept_opts,
}
}
/// Polls to accept a new incoming connection to this listener.
pub fn poll_accept(&self, cx: &mut task::Context<'_>) -> Poll<io::Result<(TokioTcpStream, SocketAddr)>> {
let (stream, peer_addr) = ready!(self.inner.poll_accept(cx))?;
setsockopt_with_opt(&stream, &self.accept_opts)?;
Poll::Ready(Ok((stream, peer_addr)))
}
/// Accept a new incoming connection to this listener
pub async fn accept(&self) -> io::Result<(TokioTcpStream, SocketAddr)> {
future::poll_fn(|cx| self.poll_accept(cx)).await
}
/// Unwraps and take the internal `TcpListener`
pub fn into_inner(self) -> TokioTcpListener {
self.inner
}
}
impl Deref for TcpListener {
type Target = TokioTcpListener;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TcpListener {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<TcpListener> for TokioTcpListener {
fn from(listener: TcpListener) -> TokioTcpListener {
listener.inner
}
}
#[cfg(unix)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_fd(f.as_raw_fd()) };
macro_rules! try_sockopt {
($socket:ident. $func:ident ($($arg:expr),*)) => {
match $socket. $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_fd();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
#[allow(unused_mut)]
let mut keepalive = TcpKeepalive::new().with_time(keepalive_duration);
#[cfg(any(
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
{
keepalive = keepalive.with_interval(keepalive_duration);
}
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_fd();
Ok(())
}
#[cfg(windows)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_socket(f.as_raw_socket()) };
macro_rules! try_sockopt {
($socket:ident. $func:ident ($($arg:expr),*)) => {
match $socket. $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_socket();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
let keepalive = TcpKeepalive::new()
.with_time(keepalive_duration)
.with_interval(keepalive_duration);
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_socket();
Ok(())
}
#[cfg(all(not(windows), not(unix)))]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
f.set_nodelay(opts.tcp.nodelay)?;
Ok(())
}
#[cfg(unix)]
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
self.0.as_raw_fd()
}
}
#[cfg(windows)]
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.0.as_raw_socket()
}
} | // This is probably 0.0.0.0 with the same port has already been occupied | random_line_split |
tcp.rs | //! TcpStream wrappers that supports connecting with options
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
use std::{
io::{self, ErrorKind},
net::SocketAddr,
ops::{Deref, DerefMut},
pin::Pin,
task::{self, Poll},
};
use futures::{future, ready};
use log::{debug, warn};
use pin_project::pin_project;
use socket2::{Socket, TcpKeepalive};
use tokio::{
io::{AsyncRead, AsyncWrite, ReadBuf},
net::{TcpListener as TokioTcpListener, TcpSocket, TcpStream as TokioTcpStream},
};
use crate::{context::Context, relay::socks5::Address, ServerAddr};
use super::{
sys::{set_tcp_fastopen, TcpStream as SysTcpStream},
AcceptOpts,
ConnectOpts,
};
/// TcpStream for outbound connections
#[pin_project]
pub struct TcpStream(#[pin] SysTcpStream);
impl TcpStream {
/// Connects to address
pub async fn connect_with_opts(addr: &SocketAddr, opts: &ConnectOpts) -> io::Result<TcpStream> {
// tcp_stream_connect(addr, opts).await.map(TcpStream)
SysTcpStream::connect(*addr, opts).await.map(TcpStream)
}
/// Connects shadowsocks server
pub async fn connect_server_with_opts(
context: &Context,
addr: &ServerAddr,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
ServerAddr::SocketAddr(ref addr) => SysTcpStream::connect(*addr, opts).await?,
ServerAddr::DomainName(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
/// Connects proxy remote target
pub async fn connect_remote_with_opts(
context: &Context,
addr: &Address,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
Address::SocketAddress(ref addr) => SysTcpStream::connect(*addr, opts).await?,
Address::DomainNameAddress(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
}
impl Deref for TcpStream {
type Target = TokioTcpStream;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for TcpStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AsyncRead for TcpStream {
fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_read(cx, buf)
}
}
impl AsyncWrite for TcpStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.project().0.poll_write(cx, buf)
}
fn | (self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TcpListener` for accepting inbound connections
pub struct TcpListener {
inner: TokioTcpListener,
accept_opts: AcceptOpts,
}
impl TcpListener {
/// Creates a new TcpListener, which will be bound to the specified address.
pub async fn bind_with_opts(addr: &SocketAddr, accept_opts: AcceptOpts) -> io::Result<TcpListener> {
let socket = match *addr {
SocketAddr::V4(..) => TcpSocket::new_v4()?,
SocketAddr::V6(..) => TcpSocket::new_v6()?,
};
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
socket.set_reuseaddr(true)?;
let set_dual_stack = if let SocketAddr::V6(ref v6) = *addr {
v6.ip().is_unspecified()
} else {
false
};
if set_dual_stack {
// Set to DUAL STACK mode by default.
// WARNING: This would fail if you want to start another program listening on the same port.
//
// Should this behavior be configurable?
fn set_only_v6(socket: &TcpSocket, only_v6: bool) {
unsafe {
// WARN: If the following code panics, FD will be closed twice.
#[cfg(unix)]
let s = Socket::from_raw_fd(socket.as_raw_fd());
#[cfg(windows)]
let s = Socket::from_raw_socket(socket.as_raw_socket());
if let Err(err) = s.set_only_v6(only_v6) {
warn!("failed to set IPV6_V6ONLY: {} for listener, error: {}", only_v6, err);
// This is not a fatal error, just warn and skip
}
#[cfg(unix)]
let _ = s.into_raw_fd();
#[cfg(windows)]
let _ = s.into_raw_socket();
}
}
set_only_v6(&socket, false);
match socket.bind(*addr) {
Ok(..) => {}
Err(ref err) if err.kind() == ErrorKind::AddrInUse => {
// This is probably 0.0.0.0 with the same port has already been occupied
debug!(
"0.0.0.0:{} may have already been occupied, retry with IPV6_V6ONLY",
addr.port()
);
set_only_v6(&socket, true);
socket.bind(*addr)?;
}
Err(err) => return Err(err),
}
} else {
socket.bind(*addr)?;
}
// mio's default backlog is 1024
let inner = socket.listen(1024)?;
// Enable TFO if supported
// macos requires TCP_FASTOPEN to be set after listen(), but other platform doesn't have this constraint
if accept_opts.tcp.fastopen {
set_tcp_fastopen(&inner)?;
}
Ok(TcpListener { inner, accept_opts })
}
/// Create a `TcpListener` from tokio's `TcpListener`
pub fn from_listener(listener: TokioTcpListener, accept_opts: AcceptOpts) -> TcpListener {
TcpListener {
inner: listener,
accept_opts,
}
}
/// Polls to accept a new incoming connection to this listener.
pub fn poll_accept(&self, cx: &mut task::Context<'_>) -> Poll<io::Result<(TokioTcpStream, SocketAddr)>> {
let (stream, peer_addr) = ready!(self.inner.poll_accept(cx))?;
setsockopt_with_opt(&stream, &self.accept_opts)?;
Poll::Ready(Ok((stream, peer_addr)))
}
/// Accept a new incoming connection to this listener
pub async fn accept(&self) -> io::Result<(TokioTcpStream, SocketAddr)> {
future::poll_fn(|cx| self.poll_accept(cx)).await
}
/// Unwraps and take the internal `TcpListener`
pub fn into_inner(self) -> TokioTcpListener {
self.inner
}
}
impl Deref for TcpListener {
type Target = TokioTcpListener;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TcpListener {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<TcpListener> for TokioTcpListener {
fn from(listener: TcpListener) -> TokioTcpListener {
listener.inner
}
}
#[cfg(unix)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_fd(f.as_raw_fd()) };
macro_rules! try_sockopt {
($socket:ident. $func:ident ($($arg:expr),*)) => {
match $socket. $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_fd();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
#[allow(unused_mut)]
let mut keepalive = TcpKeepalive::new().with_time(keepalive_duration);
#[cfg(any(
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
{
keepalive = keepalive.with_interval(keepalive_duration);
}
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_fd();
Ok(())
}
#[cfg(windows)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_socket(f.as_raw_socket()) };
macro_rules! try_sockopt {
($socket:ident. $func:ident ($($arg:expr),*)) => {
match $socket. $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_socket();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
let keepalive = TcpKeepalive::new()
.with_time(keepalive_duration)
.with_interval(keepalive_duration);
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_socket();
Ok(())
}
#[cfg(all(not(windows), not(unix)))]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
f.set_nodelay(opts.tcp.nodelay)?;
Ok(())
}
#[cfg(unix)]
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
self.0.as_raw_fd()
}
}
#[cfg(windows)]
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.0.as_raw_socket()
}
}
| poll_flush | identifier_name |
tcp.rs | //! TcpStream wrappers that supports connecting with options
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
use std::{
io::{self, ErrorKind},
net::SocketAddr,
ops::{Deref, DerefMut},
pin::Pin,
task::{self, Poll},
};
use futures::{future, ready};
use log::{debug, warn};
use pin_project::pin_project;
use socket2::{Socket, TcpKeepalive};
use tokio::{
io::{AsyncRead, AsyncWrite, ReadBuf},
net::{TcpListener as TokioTcpListener, TcpSocket, TcpStream as TokioTcpStream},
};
use crate::{context::Context, relay::socks5::Address, ServerAddr};
use super::{
sys::{set_tcp_fastopen, TcpStream as SysTcpStream},
AcceptOpts,
ConnectOpts,
};
/// TcpStream for outbound connections
#[pin_project]
pub struct TcpStream(#[pin] SysTcpStream);
impl TcpStream {
/// Connects to address
pub async fn connect_with_opts(addr: &SocketAddr, opts: &ConnectOpts) -> io::Result<TcpStream> {
// tcp_stream_connect(addr, opts).await.map(TcpStream)
SysTcpStream::connect(*addr, opts).await.map(TcpStream)
}
/// Connects shadowsocks server
pub async fn connect_server_with_opts(
context: &Context,
addr: &ServerAddr,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
ServerAddr::SocketAddr(ref addr) => SysTcpStream::connect(*addr, opts).await?,
ServerAddr::DomainName(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
/// Connects proxy remote target
pub async fn connect_remote_with_opts(
context: &Context,
addr: &Address,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
Address::SocketAddress(ref addr) => SysTcpStream::connect(*addr, opts).await?,
Address::DomainNameAddress(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
}
impl Deref for TcpStream {
type Target = TokioTcpStream;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for TcpStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AsyncRead for TcpStream {
fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_read(cx, buf)
}
}
impl AsyncWrite for TcpStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.project().0.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TcpListener` for accepting inbound connections
pub struct TcpListener {
inner: TokioTcpListener,
accept_opts: AcceptOpts,
}
impl TcpListener {
/// Creates a new TcpListener, which will be bound to the specified address.
pub async fn bind_with_opts(addr: &SocketAddr, accept_opts: AcceptOpts) -> io::Result<TcpListener> {
let socket = match *addr {
SocketAddr::V4(..) => TcpSocket::new_v4()?,
SocketAddr::V6(..) => TcpSocket::new_v6()?,
};
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
socket.set_reuseaddr(true)?;
let set_dual_stack = if let SocketAddr::V6(ref v6) = *addr {
v6.ip().is_unspecified()
} else {
false
};
if set_dual_stack {
// Set to DUAL STACK mode by default.
// WARNING: This would fail if you want to start another program listening on the same port.
//
// Should this behavior be configurable?
fn set_only_v6(socket: &TcpSocket, only_v6: bool) {
unsafe {
// WARN: If the following code panics, FD will be closed twice.
#[cfg(unix)]
let s = Socket::from_raw_fd(socket.as_raw_fd());
#[cfg(windows)]
let s = Socket::from_raw_socket(socket.as_raw_socket());
if let Err(err) = s.set_only_v6(only_v6) {
warn!("failed to set IPV6_V6ONLY: {} for listener, error: {}", only_v6, err);
// This is not a fatal error, just warn and skip
}
#[cfg(unix)]
let _ = s.into_raw_fd();
#[cfg(windows)]
let _ = s.into_raw_socket();
}
}
set_only_v6(&socket, false);
match socket.bind(*addr) {
Ok(..) => {}
Err(ref err) if err.kind() == ErrorKind::AddrInUse => {
// This is probably 0.0.0.0 with the same port has already been occupied
debug!(
"0.0.0.0:{} may have already been occupied, retry with IPV6_V6ONLY",
addr.port()
);
set_only_v6(&socket, true);
socket.bind(*addr)?;
}
Err(err) => return Err(err),
}
} else {
socket.bind(*addr)?;
}
// mio's default backlog is 1024
let inner = socket.listen(1024)?;
// Enable TFO if supported
// macos requires TCP_FASTOPEN to be set after listen(), but other platform doesn't have this constraint
if accept_opts.tcp.fastopen {
set_tcp_fastopen(&inner)?;
}
Ok(TcpListener { inner, accept_opts })
}
/// Create a `TcpListener` from tokio's `TcpListener`
pub fn from_listener(listener: TokioTcpListener, accept_opts: AcceptOpts) -> TcpListener {
TcpListener {
inner: listener,
accept_opts,
}
}
/// Polls to accept a new incoming connection to this listener.
pub fn poll_accept(&self, cx: &mut task::Context<'_>) -> Poll<io::Result<(TokioTcpStream, SocketAddr)>> {
let (stream, peer_addr) = ready!(self.inner.poll_accept(cx))?;
setsockopt_with_opt(&stream, &self.accept_opts)?;
Poll::Ready(Ok((stream, peer_addr)))
}
/// Accept a new incoming connection to this listener
pub async fn accept(&self) -> io::Result<(TokioTcpStream, SocketAddr)> {
future::poll_fn(|cx| self.poll_accept(cx)).await
}
/// Unwraps and take the internal `TcpListener`
pub fn into_inner(self) -> TokioTcpListener {
self.inner
}
}
impl Deref for TcpListener {
type Target = TokioTcpListener;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TcpListener {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<TcpListener> for TokioTcpListener {
fn from(listener: TcpListener) -> TokioTcpListener {
listener.inner
}
}
#[cfg(unix)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_fd(f.as_raw_fd()) };
macro_rules! try_sockopt {
($socket:ident. $func:ident ($($arg:expr),*)) => {
match $socket. $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_fd();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
#[allow(unused_mut)]
let mut keepalive = TcpKeepalive::new().with_time(keepalive_duration);
#[cfg(any(
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
{
keepalive = keepalive.with_interval(keepalive_duration);
}
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_fd();
Ok(())
}
#[cfg(windows)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_socket(f.as_raw_socket()) };
macro_rules! try_sockopt {
($socket:ident. $func:ident ($($arg:expr),*)) => {
match $socket. $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_socket();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
let keepalive = TcpKeepalive::new()
.with_time(keepalive_duration)
.with_interval(keepalive_duration);
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_socket();
Ok(())
}
#[cfg(all(not(windows), not(unix)))]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
f.set_nodelay(opts.tcp.nodelay)?;
Ok(())
}
#[cfg(unix)]
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
| #[cfg(windows)]
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.0.as_raw_socket()
}
}
| self.0.as_raw_fd()
}
}
| identifier_body |
piv.rs | // Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::errors::invalid_data_error;
use iso7816::aid::Aid;
use iso7816::command::instruction::Instruction;
use iso7816::command::Command;
use iso7816::response::Status;
use iso7816_tlv::ber::{Tag, Tlv, Value};
use rdp::model::error::*;
use rsa::pkcs1::DecodeRsaPrivateKey;
use rsa::traits::{PrivateKeyParts, PublicKeyParts};
use rsa::{BigUint, RsaPrivateKey};
use std::convert::TryFrom;
use std::fmt::Write as _;
use std::io::{Cursor, Read};
use uuid::Uuid;
// AID (Application ID) of PIV application, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
const PIV_AID: Aid = Aid::new_truncatable(
&[
0xA0, 0x00, 0x00, 0x03, 0x08, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,
],
5, // usually truncates to first 5 bytes
);
// Card implements a PIV-compatible smartcard, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
#[derive(Debug, PartialEq, Eq)]
pub struct Card<const S: usize> {
// Card-holder user ID (CHUID). In federal agencies, this value would be unique per employee
// and encodes some agency information. In our case it's static.
chuid: Vec<u8>,
piv_auth_cert: Vec<u8>,
piv_auth_key: RsaPrivateKey,
pin: String,
// Pending command and response to receive/send over multiple messages when
// they don't fit into one.
pending_command: Option<Command<S>>,
pending_response: Option<Cursor<Vec<u8>>>,
}
impl<const S: usize> Card<S> {
pub fn new(uuid: Uuid, cert_der: &[u8], key_der: &[u8], pin: String) -> RdpResult<Self> {
let piv_auth_key = RsaPrivateKey::from_pkcs1_der(key_der).map_err(|e| {
invalid_data_error(&format!("failed to parse private key from DER: {e:?}"))
})?;
Ok(Self {
chuid: Self::build_chuid(uuid),
piv_auth_cert: Self::build_piv_auth_cert(cert_der),
piv_auth_key,
pin,
pending_command: None,
pending_response: None,
})
}
pub fn handle(&mut self, cmd: Command<S>) -> RdpResult<Response> {
debug!("got command: {:?}", cmd);
debug!("command data: {}", hex_data(&cmd));
// Handle chained commands.
let cmd = match self.pending_command.as_mut() {
None => cmd,
Some(pending) => {
pending
.extend_from_command(&cmd)
.map_err(|_| invalid_data_error("could not build chained command"))?;
pending.clone()
}
};
if cmd.class().chain().not_the_last() {
self.pending_command = Some(cmd);
return Ok(Response::new(Status::Success));
} else {
self.pending_command = None;
}
let resp = match cmd.instruction() {
Instruction::Select => self.handle_select(cmd),
Instruction::Verify => self.handle_verify(cmd),
Instruction::GetData => self.handle_get_data(cmd),
Instruction::GetResponse => self.handle_get_response(cmd),
Instruction::GeneralAuthenticate => self.handle_general_authenticate(cmd),
_ => {
warn!("unimplemented instruction {:?}", cmd.instruction());
Ok(Response::new(Status::InstructionNotSupportedOrInvalid))
}
}?;
debug!("send response: {:?}", resp);
debug!("response data: {}", to_hex(&resp.encode()));
Ok(resp)
}
fn handle_select(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// For our use case, we only allow selecting the PIV application on the smartcard.
//
// P1=04 and P2=00 means selection of DF (usually) application by name. Everything else not
// supported.
if cmd.p1!= 0x04 && cmd.p2!= 0x00 {
return Ok(Response::new(Status::NotFound));
}
if!PIV_AID.matches(cmd.data()) {
return Ok(Response::new(Status::NotFound));
}
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.1
let resp = tlv(
TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE,
Value::Constructed(vec![
tlv(
TLV_TAG_AID,
Value::Primitive(vec![0x00, 0x00, 0x10, 0x00, 0x01, 0x00]),
)?,
tlv(
TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY,
Value::Constructed(vec![tlv(
TLV_TAG_AID,
Value::Primitive(PIV_AID.truncated().to_vec()),
)?]),
)?,
]),
)?;
Ok(Response::with_data(Status::Success, resp.to_vec()))
}
fn handle_verify(&mut self, cmd: Command<S>) -> RdpResult<Response> {
if cmd.data() == self.pin.as_bytes() {
Ok(Response::new(Status::Success))
} else {
warn!("PIN mismatch, want {}, got {:?}", self.pin, cmd.data());
Ok(Response::new(Status::VerificationFailed))
}
}
fn handle_get_data(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.2.
if cmd.p1!= 0x3F && cmd.p2!= 0xFF {
return Ok(Response::new(Status::NotFound));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(0x5C)? {
return Ok(Response::new(Status::NotFound));
}
match request_tlv.value() {
Value::Primitive(tag) => match to_hex(tag).as_str() {
// Card Holder Unique Identifier.
"5FC102" => Ok(Response::with_data(Status::Success, self.chuid.clone())),
// X.509 Certificate for PIV Authentication
"5FC105" => {
self.pending_response = Some(Cursor::new(self.piv_auth_cert.clone()));
self.handle_get_response(cmd)
}
_ => {
// Some other unimplemented data object.
Ok(Response::new(Status::NotFound))
}
},
Value::Constructed(_) => Ok(Response::new(Status::NotFound)),
}
}
fn handle_get_response(&mut self, _cmd: Command<S>) -> RdpResult<Response> {
// CHINK_SIZE is the max response data size in bytes, without resorting to "extended"
// messages.
const CHUNK_SIZE: usize = 256;
match &mut self.pending_response {
None => Ok(Response::new(Status::NotFound)),
Some(cursor) => {
let mut chunk = [0; CHUNK_SIZE];
let n = cursor.read(&mut chunk)?;
let mut chunk = chunk.to_vec();
chunk.truncate(n);
let remaining = cursor.get_ref().len() as u64 - cursor.position();
let status = if remaining == 0 {
Status::Success
} else if remaining < CHUNK_SIZE as u64 {
Status::MoreAvailable(remaining as u8)
} else {
Status::MoreAvailable(0)
};
Ok(Response::with_data(status, chunk))
}
}
}
/// Sign the challenge.
///
/// Note: for signatures, typically you'd use a signer that hashes the input data, adds padding
/// according to some scheme (like PKCS1v15 or PSS) and then "decrypts" this data with the key.
/// The decrypted blob is the signature.
///
/// In our case, the RDP server does the hashing and padding, and only gives us a finished blob
/// to decrypt. Most crypto libraries don't directly expose RSA decryption without padding, as
/// it's easy to build insecure crypto systems. Thankfully for us, this decryption is just a single
/// modpow operation which is suppored by RustCrypto.
fn sign_auth_challenge(&self, challenge: &[u8]) -> Vec<u8> {
let c = BigUint::from_bytes_be(challenge);
let plain_text = c
.modpow(self.piv_auth_key.d(), self.piv_auth_key.n())
.to_bytes_be();
let mut result = vec![0u8; self.piv_auth_key.size()];
let start = result.len() - plain_text.len();
result[start..].copy_from_slice(&plain_text);
result
}
fn handle_general_authenticate(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See section 3.2.4 and example in Appending A.3 from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// P1='07' means 2048-bit RSA.
//
// TODO(zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1!= 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2!= 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag()!= tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data),
status,
}
}
pub fn encode(&self) -> Vec<u8> {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
}
}
// SELECT command tags.
const TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE: u8 = 0x61;
const TLV_TAG_AID: u8 = 0x4F;
const TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY: u8 = 0x79;
const TLV_TAG_DATA_FIELD: u8 = 0x53;
const TLV_TAG_FASC_N: u8 = 0x30;
const TLV_TAG_GUID: u8 = 0x34;
const TLV_TAG_EXPIRATION_DATE: u8 = 0x35;
const TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE: u8 = 0x3E;
const TLV_TAG_ERROR_DETECTION_CODE: u8 = 0xFE;
const TLV_TAG_CERTIFICATE: u8 = 0x70;
const TLV_TAG_CERTINFO: u8 = 0x71;
// GENERAL AUTHENTICATE command tags.
const TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE: u8 = 0x7C;
const TLV_TAG_CHALLENGE: u8 = 0x81;
const TLV_TAG_RESPONSE: u8 = 0x82;
fn tlv(tag: u8, value: Value) -> RdpResult<Tlv> {
Tlv::new(tlv_tag(tag)?, value)
.map_err(|e| invalid_data_error(&format!("TLV with tag {tag:#X} invalid: {e:?}")))
}
fn tlv_tag(val: u8) -> RdpResult<Tag> {
Tag::try_from(val).map_err(|e| invalid_data_error(&format!("TLV tag {val:#X} invalid: {e:?}")))
}
fn hex_data<const S: usize>(cmd: &Command<S>) -> String {
to_hex(cmd.data())
}
fn to_hex(bytes: &[u8]) -> String {
let mut s = String::new();
for b in bytes {
// https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string
let _ = write!(s, "{b:02X}");
}
s
}
#[allow(clippy::cast_possible_truncation)]
fn len_to_vec(len: usize) -> Vec<u8> {
if len < 0x7f | else {
let mut ret: Vec<u8> = len
.to_be_bytes()
.iter()
.skip_while(|&x| *x == 0)
.cloned()
.collect();
ret.insert(0, 0x80 | ret.len() as u8);
ret
}
}
| {
vec![len as u8]
} | conditional_block |
piv.rs | // Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::errors::invalid_data_error;
use iso7816::aid::Aid;
use iso7816::command::instruction::Instruction;
use iso7816::command::Command;
use iso7816::response::Status;
use iso7816_tlv::ber::{Tag, Tlv, Value};
use rdp::model::error::*;
use rsa::pkcs1::DecodeRsaPrivateKey;
use rsa::traits::{PrivateKeyParts, PublicKeyParts};
use rsa::{BigUint, RsaPrivateKey};
use std::convert::TryFrom;
use std::fmt::Write as _;
use std::io::{Cursor, Read};
use uuid::Uuid;
// AID (Application ID) of PIV application, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
const PIV_AID: Aid = Aid::new_truncatable(
&[
0xA0, 0x00, 0x00, 0x03, 0x08, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,
],
5, // usually truncates to first 5 bytes
);
// Card implements a PIV-compatible smartcard, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
#[derive(Debug, PartialEq, Eq)]
pub struct Card<const S: usize> {
// Card-holder user ID (CHUID). In federal agencies, this value would be unique per employee
// and encodes some agency information. In our case it's static.
chuid: Vec<u8>,
piv_auth_cert: Vec<u8>,
piv_auth_key: RsaPrivateKey,
pin: String,
// Pending command and response to receive/send over multiple messages when
// they don't fit into one.
pending_command: Option<Command<S>>,
pending_response: Option<Cursor<Vec<u8>>>,
}
impl<const S: usize> Card<S> {
pub fn new(uuid: Uuid, cert_der: &[u8], key_der: &[u8], pin: String) -> RdpResult<Self> {
let piv_auth_key = RsaPrivateKey::from_pkcs1_der(key_der).map_err(|e| {
invalid_data_error(&format!("failed to parse private key from DER: {e:?}"))
})?;
Ok(Self {
chuid: Self::build_chuid(uuid),
piv_auth_cert: Self::build_piv_auth_cert(cert_der),
piv_auth_key,
pin,
pending_command: None,
pending_response: None,
})
}
pub fn handle(&mut self, cmd: Command<S>) -> RdpResult<Response> {
debug!("got command: {:?}", cmd);
debug!("command data: {}", hex_data(&cmd));
// Handle chained commands.
let cmd = match self.pending_command.as_mut() {
None => cmd,
Some(pending) => {
pending
.extend_from_command(&cmd)
.map_err(|_| invalid_data_error("could not build chained command"))?;
pending.clone()
}
};
if cmd.class().chain().not_the_last() {
self.pending_command = Some(cmd);
return Ok(Response::new(Status::Success));
} else {
self.pending_command = None;
}
let resp = match cmd.instruction() {
Instruction::Select => self.handle_select(cmd),
Instruction::Verify => self.handle_verify(cmd),
Instruction::GetData => self.handle_get_data(cmd),
Instruction::GetResponse => self.handle_get_response(cmd),
Instruction::GeneralAuthenticate => self.handle_general_authenticate(cmd),
_ => {
warn!("unimplemented instruction {:?}", cmd.instruction());
Ok(Response::new(Status::InstructionNotSupportedOrInvalid))
}
}?;
debug!("send response: {:?}", resp);
debug!("response data: {}", to_hex(&resp.encode()));
Ok(resp)
}
fn handle_select(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// For our use case, we only allow selecting the PIV application on the smartcard.
//
// P1=04 and P2=00 means selection of DF (usually) application by name. Everything else not
// supported.
if cmd.p1!= 0x04 && cmd.p2!= 0x00 {
return Ok(Response::new(Status::NotFound));
}
if!PIV_AID.matches(cmd.data()) {
return Ok(Response::new(Status::NotFound));
}
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.1
let resp = tlv(
TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE,
Value::Constructed(vec![
tlv(
TLV_TAG_AID,
Value::Primitive(vec![0x00, 0x00, 0x10, 0x00, 0x01, 0x00]),
)?,
tlv(
TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY,
Value::Constructed(vec![tlv(
TLV_TAG_AID,
Value::Primitive(PIV_AID.truncated().to_vec()),
)?]),
)?,
]),
)?;
Ok(Response::with_data(Status::Success, resp.to_vec()))
}
fn handle_verify(&mut self, cmd: Command<S>) -> RdpResult<Response> {
if cmd.data() == self.pin.as_bytes() {
Ok(Response::new(Status::Success))
} else {
warn!("PIN mismatch, want {}, got {:?}", self.pin, cmd.data());
Ok(Response::new(Status::VerificationFailed))
}
}
fn handle_get_data(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.2.
if cmd.p1!= 0x3F && cmd.p2!= 0xFF {
return Ok(Response::new(Status::NotFound));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(0x5C)? {
return Ok(Response::new(Status::NotFound));
}
match request_tlv.value() {
Value::Primitive(tag) => match to_hex(tag).as_str() {
// Card Holder Unique Identifier.
"5FC102" => Ok(Response::with_data(Status::Success, self.chuid.clone())),
// X.509 Certificate for PIV Authentication
"5FC105" => {
self.pending_response = Some(Cursor::new(self.piv_auth_cert.clone()));
self.handle_get_response(cmd)
}
_ => {
// Some other unimplemented data object.
Ok(Response::new(Status::NotFound))
}
},
Value::Constructed(_) => Ok(Response::new(Status::NotFound)),
}
}
fn handle_get_response(&mut self, _cmd: Command<S>) -> RdpResult<Response> {
// CHINK_SIZE is the max response data size in bytes, without resorting to "extended"
// messages.
const CHUNK_SIZE: usize = 256;
match &mut self.pending_response {
None => Ok(Response::new(Status::NotFound)),
Some(cursor) => {
let mut chunk = [0; CHUNK_SIZE];
let n = cursor.read(&mut chunk)?;
let mut chunk = chunk.to_vec();
chunk.truncate(n);
let remaining = cursor.get_ref().len() as u64 - cursor.position();
let status = if remaining == 0 {
Status::Success
} else if remaining < CHUNK_SIZE as u64 {
Status::MoreAvailable(remaining as u8)
} else {
Status::MoreAvailable(0)
};
Ok(Response::with_data(status, chunk))
}
}
}
/// Sign the challenge.
///
/// Note: for signatures, typically you'd use a signer that hashes the input data, adds padding
/// according to some scheme (like PKCS1v15 or PSS) and then "decrypts" this data with the key.
/// The decrypted blob is the signature.
///
/// In our case, the RDP server does the hashing and padding, and only gives us a finished blob
/// to decrypt. Most crypto libraries don't directly expose RSA decryption without padding, as
/// it's easy to build insecure crypto systems. Thankfully for us, this decryption is just a single
/// modpow operation which is suppored by RustCrypto.
fn sign_auth_challenge(&self, challenge: &[u8]) -> Vec<u8> {
let c = BigUint::from_bytes_be(challenge);
let plain_text = c
.modpow(self.piv_auth_key.d(), self.piv_auth_key.n())
.to_bytes_be();
let mut result = vec![0u8; self.piv_auth_key.size()];
let start = result.len() - plain_text.len();
result[start..].copy_from_slice(&plain_text);
result
}
fn handle_general_authenticate(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See section 3.2.4 and example in Appending A.3 from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// P1='07' means 2048-bit RSA.
//
// TODO(zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1!= 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2!= 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag()!= tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data),
status,
}
}
pub fn encode(&self) -> Vec<u8> |
}
// SELECT command tags.
const TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE: u8 = 0x61;
const TLV_TAG_AID: u8 = 0x4F;
const TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY: u8 = 0x79;
const TLV_TAG_DATA_FIELD: u8 = 0x53;
const TLV_TAG_FASC_N: u8 = 0x30;
const TLV_TAG_GUID: u8 = 0x34;
const TLV_TAG_EXPIRATION_DATE: u8 = 0x35;
const TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE: u8 = 0x3E;
const TLV_TAG_ERROR_DETECTION_CODE: u8 = 0xFE;
const TLV_TAG_CERTIFICATE: u8 = 0x70;
const TLV_TAG_CERTINFO: u8 = 0x71;
// GENERAL AUTHENTICATE command tags.
const TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE: u8 = 0x7C;
const TLV_TAG_CHALLENGE: u8 = 0x81;
const TLV_TAG_RESPONSE: u8 = 0x82;
fn tlv(tag: u8, value: Value) -> RdpResult<Tlv> {
Tlv::new(tlv_tag(tag)?, value)
.map_err(|e| invalid_data_error(&format!("TLV with tag {tag:#X} invalid: {e:?}")))
}
fn tlv_tag(val: u8) -> RdpResult<Tag> {
Tag::try_from(val).map_err(|e| invalid_data_error(&format!("TLV tag {val:#X} invalid: {e:?}")))
}
fn hex_data<const S: usize>(cmd: &Command<S>) -> String {
to_hex(cmd.data())
}
fn to_hex(bytes: &[u8]) -> String {
let mut s = String::new();
for b in bytes {
// https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string
let _ = write!(s, "{b:02X}");
}
s
}
#[allow(clippy::cast_possible_truncation)]
fn len_to_vec(len: usize) -> Vec<u8> {
if len < 0x7f {
vec![len as u8]
} else {
let mut ret: Vec<u8> = len
.to_be_bytes()
.iter()
.skip_while(|&x| *x == 0)
.cloned()
.collect();
ret.insert(0, 0x80 | ret.len() as u8);
ret
}
}
| {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
} | identifier_body |
piv.rs | // Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::errors::invalid_data_error;
use iso7816::aid::Aid;
use iso7816::command::instruction::Instruction;
use iso7816::command::Command;
use iso7816::response::Status;
use iso7816_tlv::ber::{Tag, Tlv, Value};
use rdp::model::error::*;
use rsa::pkcs1::DecodeRsaPrivateKey;
use rsa::traits::{PrivateKeyParts, PublicKeyParts};
use rsa::{BigUint, RsaPrivateKey};
use std::convert::TryFrom;
use std::fmt::Write as _;
use std::io::{Cursor, Read};
use uuid::Uuid;
// AID (Application ID) of PIV application, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
const PIV_AID: Aid = Aid::new_truncatable(
&[
0xA0, 0x00, 0x00, 0x03, 0x08, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,
],
5, // usually truncates to first 5 bytes
);
// Card implements a PIV-compatible smartcard, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
#[derive(Debug, PartialEq, Eq)]
pub struct Card<const S: usize> {
// Card-holder user ID (CHUID). In federal agencies, this value would be unique per employee
// and encodes some agency information. In our case it's static.
chuid: Vec<u8>,
piv_auth_cert: Vec<u8>,
piv_auth_key: RsaPrivateKey,
pin: String,
// Pending command and response to receive/send over multiple messages when
// they don't fit into one.
pending_command: Option<Command<S>>,
pending_response: Option<Cursor<Vec<u8>>>,
}
impl<const S: usize> Card<S> {
pub fn new(uuid: Uuid, cert_der: &[u8], key_der: &[u8], pin: String) -> RdpResult<Self> {
let piv_auth_key = RsaPrivateKey::from_pkcs1_der(key_der).map_err(|e| {
invalid_data_error(&format!("failed to parse private key from DER: {e:?}"))
})?;
Ok(Self {
chuid: Self::build_chuid(uuid),
piv_auth_cert: Self::build_piv_auth_cert(cert_der),
piv_auth_key,
pin,
pending_command: None,
pending_response: None,
})
}
pub fn handle(&mut self, cmd: Command<S>) -> RdpResult<Response> {
debug!("got command: {:?}", cmd);
debug!("command data: {}", hex_data(&cmd));
// Handle chained commands.
let cmd = match self.pending_command.as_mut() {
None => cmd,
Some(pending) => {
pending
.extend_from_command(&cmd)
.map_err(|_| invalid_data_error("could not build chained command"))?;
pending.clone()
}
};
if cmd.class().chain().not_the_last() {
self.pending_command = Some(cmd);
return Ok(Response::new(Status::Success));
} else {
self.pending_command = None;
}
let resp = match cmd.instruction() {
Instruction::Select => self.handle_select(cmd),
Instruction::Verify => self.handle_verify(cmd),
Instruction::GetData => self.handle_get_data(cmd),
Instruction::GetResponse => self.handle_get_response(cmd),
Instruction::GeneralAuthenticate => self.handle_general_authenticate(cmd),
_ => {
warn!("unimplemented instruction {:?}", cmd.instruction());
Ok(Response::new(Status::InstructionNotSupportedOrInvalid))
}
}?;
debug!("send response: {:?}", resp);
debug!("response data: {}", to_hex(&resp.encode()));
Ok(resp)
}
fn handle_select(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// For our use case, we only allow selecting the PIV application on the smartcard.
//
// P1=04 and P2=00 means selection of DF (usually) application by name. Everything else not
// supported.
if cmd.p1!= 0x04 && cmd.p2!= 0x00 {
return Ok(Response::new(Status::NotFound));
}
if!PIV_AID.matches(cmd.data()) {
return Ok(Response::new(Status::NotFound));
}
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.1
let resp = tlv(
TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE,
Value::Constructed(vec![
tlv(
TLV_TAG_AID,
Value::Primitive(vec![0x00, 0x00, 0x10, 0x00, 0x01, 0x00]),
)?,
tlv(
TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY,
Value::Constructed(vec![tlv(
TLV_TAG_AID,
Value::Primitive(PIV_AID.truncated().to_vec()),
)?]),
)?,
]),
)?;
Ok(Response::with_data(Status::Success, resp.to_vec()))
}
fn handle_verify(&mut self, cmd: Command<S>) -> RdpResult<Response> {
if cmd.data() == self.pin.as_bytes() {
Ok(Response::new(Status::Success))
} else {
warn!("PIN mismatch, want {}, got {:?}", self.pin, cmd.data());
Ok(Response::new(Status::VerificationFailed))
}
}
fn handle_get_data(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.2.
if cmd.p1!= 0x3F && cmd.p2!= 0xFF {
return Ok(Response::new(Status::NotFound));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(0x5C)? {
return Ok(Response::new(Status::NotFound));
}
match request_tlv.value() {
Value::Primitive(tag) => match to_hex(tag).as_str() {
// Card Holder Unique Identifier.
"5FC102" => Ok(Response::with_data(Status::Success, self.chuid.clone())),
// X.509 Certificate for PIV Authentication
"5FC105" => {
self.pending_response = Some(Cursor::new(self.piv_auth_cert.clone()));
self.handle_get_response(cmd)
}
_ => {
// Some other unimplemented data object.
Ok(Response::new(Status::NotFound))
}
},
Value::Constructed(_) => Ok(Response::new(Status::NotFound)),
}
}
fn handle_get_response(&mut self, _cmd: Command<S>) -> RdpResult<Response> {
// CHINK_SIZE is the max response data size in bytes, without resorting to "extended"
// messages.
const CHUNK_SIZE: usize = 256;
match &mut self.pending_response {
None => Ok(Response::new(Status::NotFound)),
Some(cursor) => {
let mut chunk = [0; CHUNK_SIZE];
let n = cursor.read(&mut chunk)?;
let mut chunk = chunk.to_vec();
chunk.truncate(n);
let remaining = cursor.get_ref().len() as u64 - cursor.position();
let status = if remaining == 0 {
Status::Success
} else if remaining < CHUNK_SIZE as u64 {
Status::MoreAvailable(remaining as u8)
} else {
Status::MoreAvailable(0)
};
Ok(Response::with_data(status, chunk))
}
}
}
/// Sign the challenge.
///
/// Note: for signatures, typically you'd use a signer that hashes the input data, adds padding
/// according to some scheme (like PKCS1v15 or PSS) and then "decrypts" this data with the key.
/// The decrypted blob is the signature.
///
/// In our case, the RDP server does the hashing and padding, and only gives us a finished blob
/// to decrypt. Most crypto libraries don't directly expose RSA decryption without padding, as
/// it's easy to build insecure crypto systems. Thankfully for us, this decryption is just a single
/// modpow operation which is suppored by RustCrypto.
fn sign_auth_challenge(&self, challenge: &[u8]) -> Vec<u8> {
let c = BigUint::from_bytes_be(challenge);
let plain_text = c
.modpow(self.piv_auth_key.d(), self.piv_auth_key.n())
.to_bytes_be();
let mut result = vec![0u8; self.piv_auth_key.size()];
let start = result.len() - plain_text.len();
result[start..].copy_from_slice(&plain_text);
result
}
fn handle_general_authenticate(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See section 3.2.4 and example in Appending A.3 from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// P1='07' means 2048-bit RSA.
//
// TODO(zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1!= 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2!= 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag()!= tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data), | status,
}
}
pub fn encode(&self) -> Vec<u8> {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
}
}
// SELECT command tags.
const TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE: u8 = 0x61;
const TLV_TAG_AID: u8 = 0x4F;
const TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY: u8 = 0x79;
const TLV_TAG_DATA_FIELD: u8 = 0x53;
const TLV_TAG_FASC_N: u8 = 0x30;
const TLV_TAG_GUID: u8 = 0x34;
const TLV_TAG_EXPIRATION_DATE: u8 = 0x35;
const TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE: u8 = 0x3E;
const TLV_TAG_ERROR_DETECTION_CODE: u8 = 0xFE;
const TLV_TAG_CERTIFICATE: u8 = 0x70;
const TLV_TAG_CERTINFO: u8 = 0x71;
// GENERAL AUTHENTICATE command tags.
const TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE: u8 = 0x7C;
const TLV_TAG_CHALLENGE: u8 = 0x81;
const TLV_TAG_RESPONSE: u8 = 0x82;
fn tlv(tag: u8, value: Value) -> RdpResult<Tlv> {
Tlv::new(tlv_tag(tag)?, value)
.map_err(|e| invalid_data_error(&format!("TLV with tag {tag:#X} invalid: {e:?}")))
}
fn tlv_tag(val: u8) -> RdpResult<Tag> {
Tag::try_from(val).map_err(|e| invalid_data_error(&format!("TLV tag {val:#X} invalid: {e:?}")))
}
fn hex_data<const S: usize>(cmd: &Command<S>) -> String {
to_hex(cmd.data())
}
fn to_hex(bytes: &[u8]) -> String {
let mut s = String::new();
for b in bytes {
// https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string
let _ = write!(s, "{b:02X}");
}
s
}
#[allow(clippy::cast_possible_truncation)]
fn len_to_vec(len: usize) -> Vec<u8> {
if len < 0x7f {
vec![len as u8]
} else {
let mut ret: Vec<u8> = len
.to_be_bytes()
.iter()
.skip_while(|&x| *x == 0)
.cloned()
.collect();
ret.insert(0, 0x80 | ret.len() as u8);
ret
}
} | random_line_split |
|
piv.rs | // Copyright 2021 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::errors::invalid_data_error;
use iso7816::aid::Aid;
use iso7816::command::instruction::Instruction;
use iso7816::command::Command;
use iso7816::response::Status;
use iso7816_tlv::ber::{Tag, Tlv, Value};
use rdp::model::error::*;
use rsa::pkcs1::DecodeRsaPrivateKey;
use rsa::traits::{PrivateKeyParts, PublicKeyParts};
use rsa::{BigUint, RsaPrivateKey};
use std::convert::TryFrom;
use std::fmt::Write as _;
use std::io::{Cursor, Read};
use uuid::Uuid;
// AID (Application ID) of PIV application, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
const PIV_AID: Aid = Aid::new_truncatable(
&[
0xA0, 0x00, 0x00, 0x03, 0x08, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,
],
5, // usually truncates to first 5 bytes
);
// Card implements a PIV-compatible smartcard, per:
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
#[derive(Debug, PartialEq, Eq)]
pub struct Card<const S: usize> {
// Card-holder user ID (CHUID). In federal agencies, this value would be unique per employee
// and encodes some agency information. In our case it's static.
chuid: Vec<u8>,
piv_auth_cert: Vec<u8>,
piv_auth_key: RsaPrivateKey,
pin: String,
// Pending command and response to receive/send over multiple messages when
// they don't fit into one.
pending_command: Option<Command<S>>,
pending_response: Option<Cursor<Vec<u8>>>,
}
impl<const S: usize> Card<S> {
pub fn new(uuid: Uuid, cert_der: &[u8], key_der: &[u8], pin: String) -> RdpResult<Self> {
let piv_auth_key = RsaPrivateKey::from_pkcs1_der(key_der).map_err(|e| {
invalid_data_error(&format!("failed to parse private key from DER: {e:?}"))
})?;
Ok(Self {
chuid: Self::build_chuid(uuid),
piv_auth_cert: Self::build_piv_auth_cert(cert_der),
piv_auth_key,
pin,
pending_command: None,
pending_response: None,
})
}
pub fn handle(&mut self, cmd: Command<S>) -> RdpResult<Response> {
debug!("got command: {:?}", cmd);
debug!("command data: {}", hex_data(&cmd));
// Handle chained commands.
let cmd = match self.pending_command.as_mut() {
None => cmd,
Some(pending) => {
pending
.extend_from_command(&cmd)
.map_err(|_| invalid_data_error("could not build chained command"))?;
pending.clone()
}
};
if cmd.class().chain().not_the_last() {
self.pending_command = Some(cmd);
return Ok(Response::new(Status::Success));
} else {
self.pending_command = None;
}
let resp = match cmd.instruction() {
Instruction::Select => self.handle_select(cmd),
Instruction::Verify => self.handle_verify(cmd),
Instruction::GetData => self.handle_get_data(cmd),
Instruction::GetResponse => self.handle_get_response(cmd),
Instruction::GeneralAuthenticate => self.handle_general_authenticate(cmd),
_ => {
warn!("unimplemented instruction {:?}", cmd.instruction());
Ok(Response::new(Status::InstructionNotSupportedOrInvalid))
}
}?;
debug!("send response: {:?}", resp);
debug!("response data: {}", to_hex(&resp.encode()));
Ok(resp)
}
fn handle_select(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// For our use case, we only allow selecting the PIV application on the smartcard.
//
// P1=04 and P2=00 means selection of DF (usually) application by name. Everything else not
// supported.
if cmd.p1!= 0x04 && cmd.p2!= 0x00 {
return Ok(Response::new(Status::NotFound));
}
if!PIV_AID.matches(cmd.data()) {
return Ok(Response::new(Status::NotFound));
}
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.1
let resp = tlv(
TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE,
Value::Constructed(vec![
tlv(
TLV_TAG_AID,
Value::Primitive(vec![0x00, 0x00, 0x10, 0x00, 0x01, 0x00]),
)?,
tlv(
TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY,
Value::Constructed(vec![tlv(
TLV_TAG_AID,
Value::Primitive(PIV_AID.truncated().to_vec()),
)?]),
)?,
]),
)?;
Ok(Response::with_data(Status::Success, resp.to_vec()))
}
fn handle_verify(&mut self, cmd: Command<S>) -> RdpResult<Response> {
if cmd.data() == self.pin.as_bytes() {
Ok(Response::new(Status::Success))
} else {
warn!("PIN mismatch, want {}, got {:?}", self.pin, cmd.data());
Ok(Response::new(Status::VerificationFailed))
}
}
fn handle_get_data(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf section
// 3.1.2.
if cmd.p1!= 0x3F && cmd.p2!= 0xFF {
return Ok(Response::new(Status::NotFound));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(0x5C)? {
return Ok(Response::new(Status::NotFound));
}
match request_tlv.value() {
Value::Primitive(tag) => match to_hex(tag).as_str() {
// Card Holder Unique Identifier.
"5FC102" => Ok(Response::with_data(Status::Success, self.chuid.clone())),
// X.509 Certificate for PIV Authentication
"5FC105" => {
self.pending_response = Some(Cursor::new(self.piv_auth_cert.clone()));
self.handle_get_response(cmd)
}
_ => {
// Some other unimplemented data object.
Ok(Response::new(Status::NotFound))
}
},
Value::Constructed(_) => Ok(Response::new(Status::NotFound)),
}
}
fn handle_get_response(&mut self, _cmd: Command<S>) -> RdpResult<Response> {
// CHINK_SIZE is the max response data size in bytes, without resorting to "extended"
// messages.
const CHUNK_SIZE: usize = 256;
match &mut self.pending_response {
None => Ok(Response::new(Status::NotFound)),
Some(cursor) => {
let mut chunk = [0; CHUNK_SIZE];
let n = cursor.read(&mut chunk)?;
let mut chunk = chunk.to_vec();
chunk.truncate(n);
let remaining = cursor.get_ref().len() as u64 - cursor.position();
let status = if remaining == 0 {
Status::Success
} else if remaining < CHUNK_SIZE as u64 {
Status::MoreAvailable(remaining as u8)
} else {
Status::MoreAvailable(0)
};
Ok(Response::with_data(status, chunk))
}
}
}
/// Sign the challenge.
///
/// Note: for signatures, typically you'd use a signer that hashes the input data, adds padding
/// according to some scheme (like PKCS1v15 or PSS) and then "decrypts" this data with the key.
/// The decrypted blob is the signature.
///
/// In our case, the RDP server does the hashing and padding, and only gives us a finished blob
/// to decrypt. Most crypto libraries don't directly expose RSA decryption without padding, as
/// it's easy to build insecure crypto systems. Thankfully for us, this decryption is just a single
/// modpow operation which is suppored by RustCrypto.
fn sign_auth_challenge(&self, challenge: &[u8]) -> Vec<u8> {
let c = BigUint::from_bytes_be(challenge);
let plain_text = c
.modpow(self.piv_auth_key.d(), self.piv_auth_key.n())
.to_bytes_be();
let mut result = vec![0u8; self.piv_auth_key.size()];
let start = result.len() - plain_text.len();
result[start..].copy_from_slice(&plain_text);
result
}
fn handle_general_authenticate(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See section 3.2.4 and example in Appending A.3 from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// P1='07' means 2048-bit RSA.
//
// TODO(zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1!= 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2!= 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag()!= tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag()!= tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data),
status,
}
}
pub fn encode(&self) -> Vec<u8> {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
}
}
// SELECT command tags.
const TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE: u8 = 0x61;
const TLV_TAG_AID: u8 = 0x4F;
const TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY: u8 = 0x79;
const TLV_TAG_DATA_FIELD: u8 = 0x53;
const TLV_TAG_FASC_N: u8 = 0x30;
const TLV_TAG_GUID: u8 = 0x34;
const TLV_TAG_EXPIRATION_DATE: u8 = 0x35;
const TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE: u8 = 0x3E;
const TLV_TAG_ERROR_DETECTION_CODE: u8 = 0xFE;
const TLV_TAG_CERTIFICATE: u8 = 0x70;
const TLV_TAG_CERTINFO: u8 = 0x71;
// GENERAL AUTHENTICATE command tags.
const TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE: u8 = 0x7C;
const TLV_TAG_CHALLENGE: u8 = 0x81;
const TLV_TAG_RESPONSE: u8 = 0x82;
fn tlv(tag: u8, value: Value) -> RdpResult<Tlv> {
Tlv::new(tlv_tag(tag)?, value)
.map_err(|e| invalid_data_error(&format!("TLV with tag {tag:#X} invalid: {e:?}")))
}
fn tlv_tag(val: u8) -> RdpResult<Tag> {
Tag::try_from(val).map_err(|e| invalid_data_error(&format!("TLV tag {val:#X} invalid: {e:?}")))
}
fn hex_data<const S: usize>(cmd: &Command<S>) -> String {
to_hex(cmd.data())
}
fn to_hex(bytes: &[u8]) -> String {
let mut s = String::new();
for b in bytes {
// https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string
let _ = write!(s, "{b:02X}");
}
s
}
#[allow(clippy::cast_possible_truncation)]
fn | (len: usize) -> Vec<u8> {
if len < 0x7f {
vec![len as u8]
} else {
let mut ret: Vec<u8> = len
.to_be_bytes()
.iter()
.skip_while(|&x| *x == 0)
.cloned()
.collect();
ret.insert(0, 0x80 | ret.len() as u8);
ret
}
}
| len_to_vec | identifier_name |
git.rs | use anyhow::{anyhow, Context, Result};
use git2::{
build::CheckoutBuilder, ApplyLocation, Delta, Diff, DiffFormat, DiffOptions, ErrorCode,
IndexAddOption, Oid, Repository, ResetType, Signature, StashApplyOptions, Time,
};
use itertools::Itertools;
use std::cell::RefCell;
use std::collections::HashSet;
use std::fs;
use std::hash::Hash;
use std::io::ErrorKind::NotFound;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
/// An abstraction over a Git repository providing complex behavior needed for
/// applying changes to staged files safely.
pub struct GitRepository {
repository: Repository,
}
impl GitRepository {
/// Attempts to open an already-existing repository.
///
/// If the $GIT_DIR environment variable is set, this uses it to locate the
/// Git repository. Otherwise, this searches up the directory tree from the
/// current directory to find the repository.
pub fn open() -> Result<Self> {
// When strict hash verification is disabled, it means libgit2 will not
// compute the "object id" of Git objects (which is a SHA-1 hash) after
// reading them to verify they match the object ids being used to look
// them up. This improves performance, and I don't have in front of me
// a concrete example where this is necessary to prevent data loss. If
// one becomes obvious, then we should make this configurable.
//
git2::opts::strict_hash_verification(false);
let repository = Repository::open_from_env()
.with_context(|| "Encountered an error when opening the Git repository.")?;
Ok(Self { repository })
}
pub fn save_snapshot(&mut self, staged_files: Vec<PathBuf>) -> Result<Snapshot> {
let inner = || -> Result<Snapshot> {
let deleted_files = self.get_deleted_files()?;
let unstaged_diff = self.save_unstaged_diff()?;
let backup_stash = self.save_snapshot_stash()?;
// Because `git stash` restores the HEAD commit, it brings back uncommitted
// deleted files. We need to clear them before creating our snapshot.
GitRepository::delete_files(&deleted_files)?;
self.hide_partially_staged_changes()?;
Ok(Snapshot {
backup_stash,
staged_files,
unstaged_diff,
})
};
inner().with_context(|| "Encountered an error when saving a snapshot.")
}
pub fn apply_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
self.stage_modifications(snapshot)?;
if self.get_staged_files()?.is_empty() {
return Err(anyhow!("Prevented an empty git commit."));
}
if let Some(raw_diff) = &snapshot.unstaged_diff {
let unstaged_diff = Diff::from_buffer(raw_diff)?;
self.merge_modifications(unstaged_diff)?;
}
Ok(())
}
pub fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut inner = || -> Result<()> {
self.hard_reset()?;
if let Some(backup_stash) = &snapshot.backup_stash {
self.apply_stash(&backup_stash.stash_id)?;
self.restore_merge_status(&backup_stash.merge_status)?;
}
Ok(())
};
inner().with_context(|| "Encountered an error when restoring snapshot after another error.")
}
pub fn clean_snapshot(&mut self, snapshot: Snapshot) -> Result<()> {
let inner = || -> Result<()> {
if let Some(backup_stash) = snapshot.backup_stash {
let stash_index = self
.get_stash_index_from_id(&backup_stash.stash_id)?
.ok_or_else(|| {
anyhow!(
"Could not find a backup stash with id {}.",
&backup_stash.stash_id
)
})?;
self.repository.stash_drop(stash_index)?;
}
Ok(())
};
inner().with_context(|| {
"Encountered an error when cleaning snapshot. You might find a stash entry \
in the stash list."
})
}
fn stage_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut index = self.repository.index()?;
index.add_all(
&snapshot.staged_files,
IndexAddOption::DEFAULT | IndexAddOption::DISABLE_PATHSPEC_MATCH,
None,
)?;
index.write()?;
Ok(())
}
fn merge_modifications(&self, unstaged_diff: Diff) -> Result<()> {
self.repository
.apply(&unstaged_diff, ApplyLocation::WorkDir, None)
.with_context(|| "Unstaged changes could not be restored due to a merge conflict.")
}
fn hard_reset(&self) -> Result<()> {
let head = self.repository.head()?.peel_to_commit()?;
self.repository
.reset(head.as_object(), ResetType::Hard, None)
.map_err(|error| anyhow!(error))
}
fn get_stash_index_from_id(&mut self, stash_id: &Oid) -> Result<Option<usize>> {
// It would be much better if libgit2 accepted a stash Oid
// instead of an index from the stash list.
let ref_stash_index = RefCell::new(None);
self.repository.stash_foreach(|index, _, oid| {
if oid == stash_id {
*ref_stash_index.borrow_mut() = Some(index);
false
} else {
true
}
})?;
// Copy the data out of the RefCell.
let stash_index = match *ref_stash_index.borrow() {
Some(index) => Some(index),
None => None,
};
Ok(stash_index)
}
fn apply_stash(&mut self, stash_id: &Oid) -> Result<()> {
let stash_index = self
.get_stash_index_from_id(stash_id)?
.ok_or_else(|| anyhow!("Could not find a backup stash with id {}.", stash_id))?;
self.repository.stash_apply(
stash_index,
Some(StashApplyOptions::default().reinstantiate_index()),
)?;
Ok(())
}
fn save_unstaged_diff(&self) -> Result<Option<Vec<u8>>> {
let partially_staged_files = self.get_partially_staged_files(true)?;
if partially_staged_files.is_empty() {
return Ok(None);
}
let mut diff_options = DiffOptions::new();
diff_options.show_binary(true);
for file in partially_staged_files.iter() {
diff_options.pathspec(file);
}
let unstaged_diff = self
.repository
.diff_index_to_workdir(None, Some(&mut diff_options))?;
// The Diff created by diff_index_to_workdir is owned by the repository.
// It means storing this diff separately isn't possible, and it is also
// difficult to store it along with the repository together in a struct,
// because that struct then will have a self reference between its diff
// and its repository.
//
// I'm not comfortable enough with ownership to understand the correct
// way to work around this, so the current approach that I'm taking is
// to copy the diff out into a buffer. This is not the most performant.
//
// For updates about this issue, we can keep tabs on
//
// https://github.com/rust-lang/git2-rs/issues/622
//
fn copy_diff(diff: &Diff) -> Result<Vec<u8>> {
let mut buffer = vec![];
diff.print(DiffFormat::Patch, |_, _, line| {
let origin = line.origin();
if origin == '+' || origin == '-' || origin =='' {
buffer.push(origin as u8);
}
buffer.append(&mut line.content().to_vec());
true
})?;
Ok(buffer)
}
Ok(Some(copy_diff(&unstaged_diff)?))
}
fn hide_partially_staged_changes(&self) -> Result<()> {
let partially_staged_files = self.get_partially_staged_files(false)?;
if partially_staged_files.is_empty() {
return Ok(());
}
let mut checkout_options = CheckoutBuilder::new();
checkout_options.force();
checkout_options.update_index(false);
for file in partially_staged_files.iter() {
checkout_options.path(file);
}
self.repository
.checkout_index(None, Some(&mut checkout_options))?;
Ok(())
}
pub fn get_staged_files(&self) -> Result<Vec<PathBuf>> {
let head_tree = match self.repository.head() {
Ok(head) => Ok(Some(head.peel_to_tree()?)),
Err(error) if error.code() == ErrorCode::UnbornBranch => Ok(None),
Err(error) => Err(error),
}?;
let staged_files = self
.repository
.diff_tree_to_index(head_tree.as_ref(), None, None)?
.deltas()
.flat_map(|delta| {
if delta.old_file().path() == delta.new_file().path() {
vec![delta.old_file().path()]
} else {
vec![delta.old_file().path(), delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf)
.collect();
Ok(staged_files)
}
fn get_partially_staged_files(&self, include_from_files: bool) -> Result<HashSet<PathBuf>> {
let staged_files = HashSet::from_iter(self.get_staged_files()?);
let unstaged_files = HashSet::from_iter(
self.repository
.diff_index_to_workdir(None, Some(DiffOptions::default().show_binary(true)))?
.deltas()
.flat_map(|delta| {
if include_from_files {
vec![delta.old_file().path(), delta.new_file().path()]
} else {
vec![delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf),
);
fn intersect<P: Eq + Hash>(one: HashSet<P>, two: &HashSet<P>) -> HashSet<P> {
one.into_iter().filter(|p| two.contains(p)).collect()
}
Ok(intersect(staged_files, &unstaged_files))
}
fn get_deleted_files(&self) -> Result<Vec<PathBuf>> {
let deleted_files = self
.repository
.diff_index_to_workdir(None, None)?
.deltas()
.filter(|delta| delta.status() == Delta::Deleted)
.filter_map(|delta| delta.old_file().path())
.map(Path::to_path_buf)
.collect_vec();
Ok(deleted_files)
}
fn save_snapshot_stash(&mut self) -> Result<Option<Stash>> {
if self.repository.is_empty()? {
return Ok(None);
}
fn create_signature<'a>() -> Result<Signature<'a>> {
// Because this time is only used to create a dummy signature to
// make the stash_save method happy, we don't need to use a real
// time, which skips some calls to the kernel.
//
let time = Time::new(0, 0);
Signature::new("Dummy", "[email protected]", &time)
.with_context(|| "Encountered an error when creating dummy authorship information.")
}
// Save state when in the middle of a merge prior to stashing changes in
// the working directory so that we can restore it afterward.
//
let merge_status = self.save_merge_status()?;
let signature = create_signature()?;
let stash_result = self
.repository
.stash_create(&signature, None, None);
if let Ok(stash_id) = stash_result {
self.repository.stash_store(&stash_id, Some("offstage backup"))?;
}
match stash_result {
Ok(stash_id) => Ok(Some(Stash {
stash_id,
merge_status,
})),
Err(error) if error.code() == ErrorCode::NotFound => Ok(None),
Err(error) => Err(anyhow!(error)
.context("Encountered an error when stashing a backup of the working directory.")),
}
}
fn save_merge_status(&self) -> Result<MergeStatus> {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
let merge_head = Self::read_file_to_string(merge_head_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_head_path.display()
)
})?;
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
let merge_mode = Self::read_file_to_string(merge_mode_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_mode_path.display()
)
})?;
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
let merge_msg = Self::read_file_to_string(merge_msg_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_msg_path.display()
)
})?;
Ok(MergeStatus {
merge_head,
merge_mode,
merge_msg,
})
}
fn restore_merge_status(&self, merge_status: &MergeStatus) -> Result<()> {
// Tries to restore all files before returning the first error if one exists.
let restore_merge_head_result =
merge_status
.merge_head
.as_ref()
.map_or(Ok(()), |merge_head| {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
fs::write(merge_head_path, merge_head).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_head_path.display()
)
})
});
let restore_merge_mode_result =
merge_status
.merge_mode
.as_ref()
.map_or(Ok(()), |merge_mode| {
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
fs::write(merge_mode_path, merge_mode).with_context(|| {
format!(
"Encountered an error when restoring {}.",
&merge_mode_path.display()
)
})
});
let restore_merge_msg_result =
merge_status.merge_msg.as_ref().map_or(Ok(()), |merge_msg| {
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
fs::write(merge_msg_path, merge_msg).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_msg_path.display()
)
})
});
restore_merge_head_result?;
restore_merge_mode_result?;
restore_merge_msg_result?;
Ok(())
}
fn read_file_to_string<P: AsRef<Path>>(file: P) -> Result<Option<String>> {
match fs::read_to_string(file) {
Ok(contents) => Ok(Some(contents)),
Err(error) if error.kind() == NotFound => Ok(None),
Err(error) => Err(anyhow!(error)),
}
}
fn delete_files<P: AsRef<Path>>(files: &Vec<P>) -> Result<()> {
for file in files.iter() {
fs::remove_file(file).with_context(|| {
format!(
"Encountered error when deleting {}.",
file.as_ref().display()
)
})?;
}
Ok(())
}
}
#[derive(Debug)]
pub struct Snapshot {
pub staged_files: Vec<PathBuf>,
backup_stash: Option<Stash>,
unstaged_diff: Option<Vec<u8>>,
}
#[derive(Debug)]
struct | {
stash_id: Oid,
merge_status: MergeStatus,
}
#[derive(Debug)]
struct MergeStatus {
merge_head: Option<String>,
merge_mode: Option<String>,
merge_msg: Option<String>,
}
| Stash | identifier_name |
git.rs | use anyhow::{anyhow, Context, Result};
use git2::{
build::CheckoutBuilder, ApplyLocation, Delta, Diff, DiffFormat, DiffOptions, ErrorCode,
IndexAddOption, Oid, Repository, ResetType, Signature, StashApplyOptions, Time,
};
use itertools::Itertools;
use std::cell::RefCell;
use std::collections::HashSet;
use std::fs;
use std::hash::Hash;
use std::io::ErrorKind::NotFound;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
/// An abstraction over a Git repository providing complex behavior needed for
/// applying changes to staged files safely.
pub struct GitRepository {
repository: Repository,
}
impl GitRepository {
/// Attempts to open an already-existing repository.
///
/// If the $GIT_DIR environment variable is set, this uses it to locate the
/// Git repository. Otherwise, this searches up the directory tree from the
/// current directory to find the repository.
pub fn open() -> Result<Self> |
pub fn save_snapshot(&mut self, staged_files: Vec<PathBuf>) -> Result<Snapshot> {
let inner = || -> Result<Snapshot> {
let deleted_files = self.get_deleted_files()?;
let unstaged_diff = self.save_unstaged_diff()?;
let backup_stash = self.save_snapshot_stash()?;
// Because `git stash` restores the HEAD commit, it brings back uncommitted
// deleted files. We need to clear them before creating our snapshot.
GitRepository::delete_files(&deleted_files)?;
self.hide_partially_staged_changes()?;
Ok(Snapshot {
backup_stash,
staged_files,
unstaged_diff,
})
};
inner().with_context(|| "Encountered an error when saving a snapshot.")
}
pub fn apply_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
self.stage_modifications(snapshot)?;
if self.get_staged_files()?.is_empty() {
return Err(anyhow!("Prevented an empty git commit."));
}
if let Some(raw_diff) = &snapshot.unstaged_diff {
let unstaged_diff = Diff::from_buffer(raw_diff)?;
self.merge_modifications(unstaged_diff)?;
}
Ok(())
}
pub fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut inner = || -> Result<()> {
self.hard_reset()?;
if let Some(backup_stash) = &snapshot.backup_stash {
self.apply_stash(&backup_stash.stash_id)?;
self.restore_merge_status(&backup_stash.merge_status)?;
}
Ok(())
};
inner().with_context(|| "Encountered an error when restoring snapshot after another error.")
}
pub fn clean_snapshot(&mut self, snapshot: Snapshot) -> Result<()> {
let inner = || -> Result<()> {
if let Some(backup_stash) = snapshot.backup_stash {
let stash_index = self
.get_stash_index_from_id(&backup_stash.stash_id)?
.ok_or_else(|| {
anyhow!(
"Could not find a backup stash with id {}.",
&backup_stash.stash_id
)
})?;
self.repository.stash_drop(stash_index)?;
}
Ok(())
};
inner().with_context(|| {
"Encountered an error when cleaning snapshot. You might find a stash entry \
in the stash list."
})
}
fn stage_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut index = self.repository.index()?;
index.add_all(
&snapshot.staged_files,
IndexAddOption::DEFAULT | IndexAddOption::DISABLE_PATHSPEC_MATCH,
None,
)?;
index.write()?;
Ok(())
}
fn merge_modifications(&self, unstaged_diff: Diff) -> Result<()> {
self.repository
.apply(&unstaged_diff, ApplyLocation::WorkDir, None)
.with_context(|| "Unstaged changes could not be restored due to a merge conflict.")
}
fn hard_reset(&self) -> Result<()> {
let head = self.repository.head()?.peel_to_commit()?;
self.repository
.reset(head.as_object(), ResetType::Hard, None)
.map_err(|error| anyhow!(error))
}
fn get_stash_index_from_id(&mut self, stash_id: &Oid) -> Result<Option<usize>> {
// It would be much better if libgit2 accepted a stash Oid
// instead of an index from the stash list.
let ref_stash_index = RefCell::new(None);
self.repository.stash_foreach(|index, _, oid| {
if oid == stash_id {
*ref_stash_index.borrow_mut() = Some(index);
false
} else {
true
}
})?;
// Copy the data out of the RefCell.
let stash_index = match *ref_stash_index.borrow() {
Some(index) => Some(index),
None => None,
};
Ok(stash_index)
}
fn apply_stash(&mut self, stash_id: &Oid) -> Result<()> {
let stash_index = self
.get_stash_index_from_id(stash_id)?
.ok_or_else(|| anyhow!("Could not find a backup stash with id {}.", stash_id))?;
self.repository.stash_apply(
stash_index,
Some(StashApplyOptions::default().reinstantiate_index()),
)?;
Ok(())
}
fn save_unstaged_diff(&self) -> Result<Option<Vec<u8>>> {
let partially_staged_files = self.get_partially_staged_files(true)?;
if partially_staged_files.is_empty() {
return Ok(None);
}
let mut diff_options = DiffOptions::new();
diff_options.show_binary(true);
for file in partially_staged_files.iter() {
diff_options.pathspec(file);
}
let unstaged_diff = self
.repository
.diff_index_to_workdir(None, Some(&mut diff_options))?;
// The Diff created by diff_index_to_workdir is owned by the repository.
// It means storing this diff separately isn't possible, and it is also
// difficult to store it along with the repository together in a struct,
// because that struct then will have a self reference between its diff
// and its repository.
//
// I'm not comfortable enough with ownership to understand the correct
// way to work around this, so the current approach that I'm taking is
// to copy the diff out into a buffer. This is not the most performant.
//
// For updates about this issue, we can keep tabs on
//
// https://github.com/rust-lang/git2-rs/issues/622
//
fn copy_diff(diff: &Diff) -> Result<Vec<u8>> {
let mut buffer = vec![];
diff.print(DiffFormat::Patch, |_, _, line| {
let origin = line.origin();
if origin == '+' || origin == '-' || origin =='' {
buffer.push(origin as u8);
}
buffer.append(&mut line.content().to_vec());
true
})?;
Ok(buffer)
}
Ok(Some(copy_diff(&unstaged_diff)?))
}
fn hide_partially_staged_changes(&self) -> Result<()> {
let partially_staged_files = self.get_partially_staged_files(false)?;
if partially_staged_files.is_empty() {
return Ok(());
}
let mut checkout_options = CheckoutBuilder::new();
checkout_options.force();
checkout_options.update_index(false);
for file in partially_staged_files.iter() {
checkout_options.path(file);
}
self.repository
.checkout_index(None, Some(&mut checkout_options))?;
Ok(())
}
pub fn get_staged_files(&self) -> Result<Vec<PathBuf>> {
let head_tree = match self.repository.head() {
Ok(head) => Ok(Some(head.peel_to_tree()?)),
Err(error) if error.code() == ErrorCode::UnbornBranch => Ok(None),
Err(error) => Err(error),
}?;
let staged_files = self
.repository
.diff_tree_to_index(head_tree.as_ref(), None, None)?
.deltas()
.flat_map(|delta| {
if delta.old_file().path() == delta.new_file().path() {
vec![delta.old_file().path()]
} else {
vec![delta.old_file().path(), delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf)
.collect();
Ok(staged_files)
}
fn get_partially_staged_files(&self, include_from_files: bool) -> Result<HashSet<PathBuf>> {
let staged_files = HashSet::from_iter(self.get_staged_files()?);
let unstaged_files = HashSet::from_iter(
self.repository
.diff_index_to_workdir(None, Some(DiffOptions::default().show_binary(true)))?
.deltas()
.flat_map(|delta| {
if include_from_files {
vec![delta.old_file().path(), delta.new_file().path()]
} else {
vec![delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf),
);
fn intersect<P: Eq + Hash>(one: HashSet<P>, two: &HashSet<P>) -> HashSet<P> {
one.into_iter().filter(|p| two.contains(p)).collect()
}
Ok(intersect(staged_files, &unstaged_files))
}
fn get_deleted_files(&self) -> Result<Vec<PathBuf>> {
let deleted_files = self
.repository
.diff_index_to_workdir(None, None)?
.deltas()
.filter(|delta| delta.status() == Delta::Deleted)
.filter_map(|delta| delta.old_file().path())
.map(Path::to_path_buf)
.collect_vec();
Ok(deleted_files)
}
fn save_snapshot_stash(&mut self) -> Result<Option<Stash>> {
if self.repository.is_empty()? {
return Ok(None);
}
fn create_signature<'a>() -> Result<Signature<'a>> {
// Because this time is only used to create a dummy signature to
// make the stash_save method happy, we don't need to use a real
// time, which skips some calls to the kernel.
//
let time = Time::new(0, 0);
Signature::new("Dummy", "[email protected]", &time)
.with_context(|| "Encountered an error when creating dummy authorship information.")
}
// Save state when in the middle of a merge prior to stashing changes in
// the working directory so that we can restore it afterward.
//
let merge_status = self.save_merge_status()?;
let signature = create_signature()?;
let stash_result = self
.repository
.stash_create(&signature, None, None);
if let Ok(stash_id) = stash_result {
self.repository.stash_store(&stash_id, Some("offstage backup"))?;
}
match stash_result {
Ok(stash_id) => Ok(Some(Stash {
stash_id,
merge_status,
})),
Err(error) if error.code() == ErrorCode::NotFound => Ok(None),
Err(error) => Err(anyhow!(error)
.context("Encountered an error when stashing a backup of the working directory.")),
}
}
fn save_merge_status(&self) -> Result<MergeStatus> {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
let merge_head = Self::read_file_to_string(merge_head_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_head_path.display()
)
})?;
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
let merge_mode = Self::read_file_to_string(merge_mode_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_mode_path.display()
)
})?;
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
let merge_msg = Self::read_file_to_string(merge_msg_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_msg_path.display()
)
})?;
Ok(MergeStatus {
merge_head,
merge_mode,
merge_msg,
})
}
fn restore_merge_status(&self, merge_status: &MergeStatus) -> Result<()> {
// Tries to restore all files before returning the first error if one exists.
let restore_merge_head_result =
merge_status
.merge_head
.as_ref()
.map_or(Ok(()), |merge_head| {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
fs::write(merge_head_path, merge_head).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_head_path.display()
)
})
});
let restore_merge_mode_result =
merge_status
.merge_mode
.as_ref()
.map_or(Ok(()), |merge_mode| {
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
fs::write(merge_mode_path, merge_mode).with_context(|| {
format!(
"Encountered an error when restoring {}.",
&merge_mode_path.display()
)
})
});
let restore_merge_msg_result =
merge_status.merge_msg.as_ref().map_or(Ok(()), |merge_msg| {
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
fs::write(merge_msg_path, merge_msg).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_msg_path.display()
)
})
});
restore_merge_head_result?;
restore_merge_mode_result?;
restore_merge_msg_result?;
Ok(())
}
fn read_file_to_string<P: AsRef<Path>>(file: P) -> Result<Option<String>> {
match fs::read_to_string(file) {
Ok(contents) => Ok(Some(contents)),
Err(error) if error.kind() == NotFound => Ok(None),
Err(error) => Err(anyhow!(error)),
}
}
fn delete_files<P: AsRef<Path>>(files: &Vec<P>) -> Result<()> {
for file in files.iter() {
fs::remove_file(file).with_context(|| {
format!(
"Encountered error when deleting {}.",
file.as_ref().display()
)
})?;
}
Ok(())
}
}
#[derive(Debug)]
pub struct Snapshot {
pub staged_files: Vec<PathBuf>,
backup_stash: Option<Stash>,
unstaged_diff: Option<Vec<u8>>,
}
#[derive(Debug)]
struct Stash {
stash_id: Oid,
merge_status: MergeStatus,
}
#[derive(Debug)]
struct MergeStatus {
merge_head: Option<String>,
merge_mode: Option<String>,
merge_msg: Option<String>,
}
| {
// When strict hash verification is disabled, it means libgit2 will not
// compute the "object id" of Git objects (which is a SHA-1 hash) after
// reading them to verify they match the object ids being used to look
// them up. This improves performance, and I don't have in front of me
// a concrete example where this is necessary to prevent data loss. If
// one becomes obvious, then we should make this configurable.
//
git2::opts::strict_hash_verification(false);
let repository = Repository::open_from_env()
.with_context(|| "Encountered an error when opening the Git repository.")?;
Ok(Self { repository })
} | identifier_body |
git.rs | use anyhow::{anyhow, Context, Result};
use git2::{
build::CheckoutBuilder, ApplyLocation, Delta, Diff, DiffFormat, DiffOptions, ErrorCode,
IndexAddOption, Oid, Repository, ResetType, Signature, StashApplyOptions, Time,
};
use itertools::Itertools;
use std::cell::RefCell;
use std::collections::HashSet;
use std::fs;
use std::hash::Hash;
use std::io::ErrorKind::NotFound;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
/// An abstraction over a Git repository providing complex behavior needed for
/// applying changes to staged files safely.
pub struct GitRepository {
repository: Repository,
}
impl GitRepository {
/// Attempts to open an already-existing repository.
///
/// If the $GIT_DIR environment variable is set, this uses it to locate the
/// Git repository. Otherwise, this searches up the directory tree from the
/// current directory to find the repository.
pub fn open() -> Result<Self> {
// When strict hash verification is disabled, it means libgit2 will not
// compute the "object id" of Git objects (which is a SHA-1 hash) after
// reading them to verify they match the object ids being used to look
// them up. This improves performance, and I don't have in front of me
// a concrete example where this is necessary to prevent data loss. If
// one becomes obvious, then we should make this configurable.
//
git2::opts::strict_hash_verification(false);
let repository = Repository::open_from_env()
.with_context(|| "Encountered an error when opening the Git repository.")?;
Ok(Self { repository })
}
pub fn save_snapshot(&mut self, staged_files: Vec<PathBuf>) -> Result<Snapshot> {
let inner = || -> Result<Snapshot> {
let deleted_files = self.get_deleted_files()?;
let unstaged_diff = self.save_unstaged_diff()?;
let backup_stash = self.save_snapshot_stash()?;
// Because `git stash` restores the HEAD commit, it brings back uncommitted
// deleted files. We need to clear them before creating our snapshot.
GitRepository::delete_files(&deleted_files)?;
self.hide_partially_staged_changes()?;
Ok(Snapshot {
backup_stash,
staged_files,
unstaged_diff,
})
};
inner().with_context(|| "Encountered an error when saving a snapshot.")
}
pub fn apply_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
self.stage_modifications(snapshot)?;
if self.get_staged_files()?.is_empty() {
return Err(anyhow!("Prevented an empty git commit."));
}
if let Some(raw_diff) = &snapshot.unstaged_diff {
let unstaged_diff = Diff::from_buffer(raw_diff)?;
self.merge_modifications(unstaged_diff)?;
}
Ok(())
}
pub fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut inner = || -> Result<()> {
self.hard_reset()?;
if let Some(backup_stash) = &snapshot.backup_stash {
self.apply_stash(&backup_stash.stash_id)?;
self.restore_merge_status(&backup_stash.merge_status)?;
}
Ok(())
};
inner().with_context(|| "Encountered an error when restoring snapshot after another error.")
}
pub fn clean_snapshot(&mut self, snapshot: Snapshot) -> Result<()> {
let inner = || -> Result<()> {
if let Some(backup_stash) = snapshot.backup_stash {
let stash_index = self
.get_stash_index_from_id(&backup_stash.stash_id)?
.ok_or_else(|| {
anyhow!(
"Could not find a backup stash with id {}.",
&backup_stash.stash_id
)
})?;
self.repository.stash_drop(stash_index)?;
}
Ok(())
};
inner().with_context(|| {
"Encountered an error when cleaning snapshot. You might find a stash entry \
in the stash list."
})
}
fn stage_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut index = self.repository.index()?;
index.add_all(
&snapshot.staged_files,
IndexAddOption::DEFAULT | IndexAddOption::DISABLE_PATHSPEC_MATCH,
None,
)?;
index.write()?;
Ok(())
}
fn merge_modifications(&self, unstaged_diff: Diff) -> Result<()> {
self.repository
.apply(&unstaged_diff, ApplyLocation::WorkDir, None)
.with_context(|| "Unstaged changes could not be restored due to a merge conflict.")
}
fn hard_reset(&self) -> Result<()> {
let head = self.repository.head()?.peel_to_commit()?;
self.repository
.reset(head.as_object(), ResetType::Hard, None)
.map_err(|error| anyhow!(error))
}
fn get_stash_index_from_id(&mut self, stash_id: &Oid) -> Result<Option<usize>> {
// It would be much better if libgit2 accepted a stash Oid
// instead of an index from the stash list.
let ref_stash_index = RefCell::new(None);
self.repository.stash_foreach(|index, _, oid| {
if oid == stash_id {
*ref_stash_index.borrow_mut() = Some(index);
false
} else {
true
}
})?;
// Copy the data out of the RefCell.
let stash_index = match *ref_stash_index.borrow() {
Some(index) => Some(index),
None => None,
};
Ok(stash_index)
}
fn apply_stash(&mut self, stash_id: &Oid) -> Result<()> {
let stash_index = self
.get_stash_index_from_id(stash_id)?
.ok_or_else(|| anyhow!("Could not find a backup stash with id {}.", stash_id))?;
self.repository.stash_apply(
stash_index,
Some(StashApplyOptions::default().reinstantiate_index()),
)?;
Ok(())
}
fn save_unstaged_diff(&self) -> Result<Option<Vec<u8>>> {
let partially_staged_files = self.get_partially_staged_files(true)?;
if partially_staged_files.is_empty() {
return Ok(None);
}
let mut diff_options = DiffOptions::new();
diff_options.show_binary(true);
for file in partially_staged_files.iter() {
diff_options.pathspec(file);
}
let unstaged_diff = self
.repository
.diff_index_to_workdir(None, Some(&mut diff_options))?;
// The Diff created by diff_index_to_workdir is owned by the repository.
// It means storing this diff separately isn't possible, and it is also
// difficult to store it along with the repository together in a struct,
// because that struct then will have a self reference between its diff
// and its repository.
//
// I'm not comfortable enough with ownership to understand the correct
// way to work around this, so the current approach that I'm taking is
// to copy the diff out into a buffer. This is not the most performant.
//
// For updates about this issue, we can keep tabs on
//
// https://github.com/rust-lang/git2-rs/issues/622
//
fn copy_diff(diff: &Diff) -> Result<Vec<u8>> {
let mut buffer = vec![];
diff.print(DiffFormat::Patch, |_, _, line| {
let origin = line.origin();
if origin == '+' || origin == '-' || origin =='' {
buffer.push(origin as u8);
}
buffer.append(&mut line.content().to_vec());
true
})?;
Ok(buffer)
}
Ok(Some(copy_diff(&unstaged_diff)?))
}
fn hide_partially_staged_changes(&self) -> Result<()> {
let partially_staged_files = self.get_partially_staged_files(false)?;
if partially_staged_files.is_empty() {
return Ok(());
}
let mut checkout_options = CheckoutBuilder::new();
checkout_options.force();
checkout_options.update_index(false);
for file in partially_staged_files.iter() {
checkout_options.path(file);
}
self.repository
.checkout_index(None, Some(&mut checkout_options))?;
Ok(())
}
pub fn get_staged_files(&self) -> Result<Vec<PathBuf>> {
let head_tree = match self.repository.head() {
Ok(head) => Ok(Some(head.peel_to_tree()?)),
Err(error) if error.code() == ErrorCode::UnbornBranch => Ok(None),
Err(error) => Err(error),
}?;
let staged_files = self
.repository
.diff_tree_to_index(head_tree.as_ref(), None, None)?
.deltas()
.flat_map(|delta| {
if delta.old_file().path() == delta.new_file().path() {
vec![delta.old_file().path()]
} else {
vec![delta.old_file().path(), delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf)
.collect();
Ok(staged_files)
}
fn get_partially_staged_files(&self, include_from_files: bool) -> Result<HashSet<PathBuf>> {
let staged_files = HashSet::from_iter(self.get_staged_files()?);
let unstaged_files = HashSet::from_iter(
self.repository
.diff_index_to_workdir(None, Some(DiffOptions::default().show_binary(true)))?
.deltas()
.flat_map(|delta| {
if include_from_files {
vec![delta.old_file().path(), delta.new_file().path()]
} else {
vec![delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf),
);
fn intersect<P: Eq + Hash>(one: HashSet<P>, two: &HashSet<P>) -> HashSet<P> {
one.into_iter().filter(|p| two.contains(p)).collect()
}
Ok(intersect(staged_files, &unstaged_files))
}
fn get_deleted_files(&self) -> Result<Vec<PathBuf>> {
let deleted_files = self
.repository
.diff_index_to_workdir(None, None)?
.deltas()
.filter(|delta| delta.status() == Delta::Deleted)
.filter_map(|delta| delta.old_file().path())
.map(Path::to_path_buf)
.collect_vec();
Ok(deleted_files)
}
fn save_snapshot_stash(&mut self) -> Result<Option<Stash>> {
if self.repository.is_empty()? {
return Ok(None);
}
fn create_signature<'a>() -> Result<Signature<'a>> {
// Because this time is only used to create a dummy signature to
// make the stash_save method happy, we don't need to use a real
// time, which skips some calls to the kernel.
//
let time = Time::new(0, 0);
Signature::new("Dummy", "[email protected]", &time)
.with_context(|| "Encountered an error when creating dummy authorship information.")
}
// Save state when in the middle of a merge prior to stashing changes in
// the working directory so that we can restore it afterward.
//
let merge_status = self.save_merge_status()?;
let signature = create_signature()?;
let stash_result = self
.repository
.stash_create(&signature, None, None);
if let Ok(stash_id) = stash_result {
self.repository.stash_store(&stash_id, Some("offstage backup"))?;
}
match stash_result {
Ok(stash_id) => Ok(Some(Stash {
stash_id,
merge_status,
})),
Err(error) if error.code() == ErrorCode::NotFound => Ok(None),
Err(error) => Err(anyhow!(error)
.context("Encountered an error when stashing a backup of the working directory.")),
}
}
fn save_merge_status(&self) -> Result<MergeStatus> {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
let merge_head = Self::read_file_to_string(merge_head_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_head_path.display()
)
})?;
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
let merge_mode = Self::read_file_to_string(merge_mode_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_mode_path.display()
)
})?;
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
let merge_msg = Self::read_file_to_string(merge_msg_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_msg_path.display()
)
})?;
Ok(MergeStatus {
merge_head,
merge_mode,
merge_msg,
})
}
fn restore_merge_status(&self, merge_status: &MergeStatus) -> Result<()> {
// Tries to restore all files before returning the first error if one exists.
let restore_merge_head_result =
merge_status
.merge_head
.as_ref()
.map_or(Ok(()), |merge_head| {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
fs::write(merge_head_path, merge_head).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_head_path.display()
)
})
});
let restore_merge_mode_result =
merge_status
.merge_mode
.as_ref()
.map_or(Ok(()), |merge_mode| {
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
fs::write(merge_mode_path, merge_mode).with_context(|| {
format!(
"Encountered an error when restoring {}.",
&merge_mode_path.display()
)
})
});
let restore_merge_msg_result =
merge_status.merge_msg.as_ref().map_or(Ok(()), |merge_msg| {
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
fs::write(merge_msg_path, merge_msg).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_msg_path.display()
)
})
});
restore_merge_head_result?;
restore_merge_mode_result?;
restore_merge_msg_result?;
Ok(())
}
fn read_file_to_string<P: AsRef<Path>>(file: P) -> Result<Option<String>> {
match fs::read_to_string(file) {
Ok(contents) => Ok(Some(contents)),
Err(error) if error.kind() == NotFound => Ok(None),
Err(error) => Err(anyhow!(error)),
}
}
fn delete_files<P: AsRef<Path>>(files: &Vec<P>) -> Result<()> {
for file in files.iter() {
fs::remove_file(file).with_context(|| {
format!( |
Ok(())
}
}
#[derive(Debug)]
pub struct Snapshot {
pub staged_files: Vec<PathBuf>,
backup_stash: Option<Stash>,
unstaged_diff: Option<Vec<u8>>,
}
#[derive(Debug)]
struct Stash {
stash_id: Oid,
merge_status: MergeStatus,
}
#[derive(Debug)]
struct MergeStatus {
merge_head: Option<String>,
merge_mode: Option<String>,
merge_msg: Option<String>,
} | "Encountered error when deleting {}.",
file.as_ref().display()
)
})?;
} | random_line_split |
client.rs | //! Common client functionalities.
use crate::rdsys;
use crate::rdsys::types::*;
use std::ffi::{CStr, CString};
use std::mem;
use std::os::raw::c_char;
use std::os::raw::c_void;
use std::ptr;
use std::slice;
use std::string::ToString;
use std::time::Duration;
use serde_json;
use crate::config::{ClientConfig, NativeClientConfig, RDKafkaLogLevel};
use crate::error::{IsError, KafkaError, KafkaResult};
use crate::groups::GroupList;
use crate::metadata::Metadata;
use crate::statistics::Statistics;
use crate::util::{timeout_to_ms, ErrBuf};
/// Client-level context
///
/// Each client (consumers and producers included) has a context object that can be used to
/// customize its behavior. Implementing `ClientContext` enables the customization of
/// methods common to all clients, while `ProducerContext` and `ConsumerContext` are specific to
/// producers and consumers. Refer to the list of methods to see which callbacks can currently
/// be overridden. Implementations of `ClientContext` must be thread safe, as they might be owned by
/// multiple threads.
pub trait ClientContext: Send + Sync {
/// Receives log lines from librdkafka.
fn log(&self, level: RDKafkaLogLevel, fac: &str, log_message: &str) {
match level {
RDKafkaLogLevel::Emerg
| RDKafkaLogLevel::Alert
| RDKafkaLogLevel::Critical
| RDKafkaLogLevel::Error => error!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Warning => warn!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Notice => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Info => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Debug => debug!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
}
}
/// Receives the statistics of the librdkafka client. To enable, the
/// "statistics.interval.ms" configuration parameter must be specified.
fn stats(&self, statistics: Statistics) {
info!("Client stats: {:?}", statistics);
}
/// Receives global errors from the librdkafka client.
fn error(&self, error: KafkaError, reason: &str) {
error!("librdkafka: {}: {}", error, reason);
}
// NOTE: when adding a new method, remember to add it to the FutureProducerContext as well.
// https://github.com/rust-lang/rfcs/pull/1406 will maybe help in the future.
}
/// An empty `ClientContext` that can be used when no context is needed. Default
/// callback implementations will be used.
#[derive(Clone, Default)]
pub struct DefaultClientContext;
impl ClientContext for DefaultClientContext {}
//
// ********** CLIENT **********
//
/// A native rdkafka-sys client. This struct shouldn't be used directly. Use higher level `Client`
/// or producers and consumers.
pub struct NativeClient {
ptr: *mut RDKafka,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeClient {}
unsafe impl Send for NativeClient {}
impl NativeClient {
/// Wraps a pointer to an RDKafka object and returns a new NativeClient.
pub(crate) unsafe fn from_ptr(ptr: *mut RDKafka) -> NativeClient {
NativeClient { ptr }
}
/// Returns the wrapped pointer to RDKafka.
pub fn ptr(&self) -> *mut RDKafka {
self.ptr
}
}
impl Drop for NativeClient {
fn drop(&mut self) {
trace!("Destroying client: {:p}", self.ptr);
unsafe {
rdsys::rd_kafka_destroy(self.ptr);
}
trace!("Client destroyed: {:?}", self.ptr);
}
}
/// A low level rdkafka client. This client shouldn't be used directly. The producer and consumer modules
/// provide different producer and consumer implementations based on top of `Client` that can be
/// used instead.
pub struct Client<C: ClientContext = DefaultClientContext> {
native: NativeClient,
context: Box<C>,
}
impl<C: ClientContext> Client<C> {
/// Creates a new `Client` given a configuration, a client type and a context.
pub fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
rd_kafka_type: RDKafkaType,
context: C,
) -> KafkaResult<Client<C>> {
let mut err_buf = ErrBuf::new();
let mut boxed_context = Box::new(context);
unsafe {
rdsys::rd_kafka_conf_set_opaque(
native_config.ptr(),
(&mut *boxed_context) as *mut C as *mut c_void,
)
};
unsafe { rdsys::rd_kafka_conf_set_log_cb(native_config.ptr(), Some(native_log_cb::<C>)) };
unsafe {
rdsys::rd_kafka_conf_set_stats_cb(native_config.ptr(), Some(native_stats_cb::<C>))
};
unsafe {
rdsys::rd_kafka_conf_set_error_cb(native_config.ptr(), Some(native_error_cb::<C>))
};
let client_ptr = unsafe {
rdsys::rd_kafka_new(
rd_kafka_type,
native_config.ptr_move(),
err_buf.as_mut_ptr(),
err_buf.len(),
)
};
trace!("Create new librdkafka client {:p}", client_ptr);
if client_ptr.is_null() {
return Err(KafkaError::ClientCreation(err_buf.to_string()));
}
unsafe { rdsys::rd_kafka_set_log_level(client_ptr, config.log_level as i32) };
Ok(Client {
native: unsafe { NativeClient::from_ptr(client_ptr) },
context: boxed_context,
})
}
/// Returns a reference to the native rdkafka-sys client.
pub fn native_client(&self) -> &NativeClient {
&self.native
}
/// Returns a pointer to the native rdkafka-sys client.
pub fn native_ptr(&self) -> *mut RDKafka {
self.native.ptr
}
/// Returns a reference to the context.
pub fn context(&self) -> &C {
self.context.as_ref()
}
/// Returns the metadata information for the specified topic, or for all topics in the cluster
/// if no topic is specified.
pub fn fetch_metadata<T: Into<Option<Duration>>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
let mut metadata_ptr: *const RDKafkaMetadata = ptr::null_mut();
let (flag, native_topic) = if let Some(topic_name) = topic {
(0, Some(self.native_topic(topic_name)?))
} else {
(1, None)
};
trace!("Starting metadata fetch");
let ret = unsafe {
rdsys::rd_kafka_metadata(
self.native_ptr(),
flag,
native_topic
.map(|t| t.ptr())
.unwrap_or_else(NativeTopic::null),
&mut metadata_ptr as *mut *const RDKafkaMetadata,
timeout_to_ms(timeout),
)
};
trace!("Metadata fetch completed");
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok(unsafe { Metadata::from_ptr(metadata_ptr) })
}
/// Returns high and low watermark for the specified topic and partition.
pub fn fetch_watermarks<T: Into<Option<Duration>>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
let mut low = -1;
let mut high = -1;
let topic_c = CString::new(topic.to_string())?;
let ret = unsafe {
rdsys::rd_kafka_query_watermark_offsets(
self.native_ptr(),
topic_c.as_ptr(),
partition,
&mut low as *mut i64,
&mut high as *mut i64,
timeout_to_ms(timeout),
)
};
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok((low, high))
}
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
pub fn fetch_group_list<T: Into<Option<Duration>>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
// Careful with group_c getting freed before time
let group_c = CString::new(group.map_or("".to_string(), ToString::to_string))?;
let group_c_ptr = if group.is_some() {
group_c.as_ptr()
} else {
ptr::null_mut()
}; | let mut group_list_ptr: *const RDKafkaGroupList = ptr::null_mut();
trace!("Starting group list fetch");
let ret = unsafe {
rdsys::rd_kafka_list_groups(
self.native_ptr(),
group_c_ptr,
&mut group_list_ptr as *mut *const RDKafkaGroupList,
timeout_to_ms(timeout),
)
};
trace!("Group list fetch completed");
if ret.is_error() {
return Err(KafkaError::GroupListFetch(ret.into()));
}
Ok(unsafe { GroupList::from_ptr(group_list_ptr) })
}
/// Returns a NativeTopic from the current client. The NativeTopic shouldn't outlive the client
/// it was generated from.
fn native_topic(&self, topic: &str) -> KafkaResult<NativeTopic> {
let topic_c = CString::new(topic.to_string())?;
Ok(unsafe {
NativeTopic::from_ptr(rdsys::rd_kafka_topic_new(
self.native_ptr(),
topic_c.as_ptr(),
ptr::null_mut(),
))
})
}
/// Returns a NativeQueue from the current client. The NativeQueue shouldn't
/// outlive the client it was generated from.
pub(crate) fn new_native_queue(&self) -> NativeQueue {
unsafe { NativeQueue::from_ptr(rdsys::rd_kafka_queue_new(self.native_ptr())) }
}
}
struct NativeTopic {
ptr: *mut RDKafkaTopic,
}
unsafe impl Send for NativeTopic {}
unsafe impl Sync for NativeTopic {}
impl NativeTopic {
/// Wraps a pointer to an `RDKafkaTopic` object and returns a new `NativeTopic`.
unsafe fn from_ptr(ptr: *mut RDKafkaTopic) -> NativeTopic {
NativeTopic { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaTopic structure.
fn ptr(&self) -> *mut RDKafkaTopic {
self.ptr
}
/// Returns a null pointer.
fn null() -> *mut RDKafkaTopic {
ptr::null::<u8>() as *mut RDKafkaTopic
}
}
impl Drop for NativeTopic {
fn drop(&mut self) {
trace!("Destroying NativeTopic: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_topic_destroy(self.ptr);
}
trace!("NativeTopic destroyed: {:?}", self.ptr);
}
}
pub(crate) struct NativeQueue {
ptr: *mut RDKafkaQueue,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeQueue {}
unsafe impl Send for NativeQueue {}
impl NativeQueue {
/// Wraps a pointer to an `RDKafkaQueue` object and returns a new
/// `NativeQueue`.
unsafe fn from_ptr(ptr: *mut RDKafkaQueue) -> NativeQueue {
NativeQueue { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaQueue structure.
pub fn ptr(&self) -> *mut RDKafkaQueue {
self.ptr
}
pub fn poll<T: Into<Option<Duration>>>(&self, t: T) -> *mut RDKafkaEvent {
unsafe { rdsys::rd_kafka_queue_poll(self.ptr, timeout_to_ms(t)) }
}
}
impl Drop for NativeQueue {
fn drop(&mut self) {
trace!("Destroying queue: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_queue_destroy(self.ptr);
}
trace!("Queue destroyed: {:?}", self.ptr);
}
}
pub(crate) unsafe extern "C" fn native_log_cb<C: ClientContext>(
client: *const RDKafka,
level: i32,
fac: *const c_char,
buf: *const c_char,
) {
let fac = CStr::from_ptr(fac).to_string_lossy();
let log_message = CStr::from_ptr(buf).to_string_lossy();
let context = Box::from_raw(rdsys::rd_kafka_opaque(client) as *mut C);
(*context).log(
RDKafkaLogLevel::from_int(level),
fac.trim(),
log_message.trim(),
);
mem::forget(context); // Do not free the context
}
pub(crate) unsafe extern "C" fn native_stats_cb<C: ClientContext>(
_conf: *mut RDKafka,
json: *mut c_char,
json_len: usize,
opaque: *mut c_void,
) -> i32 {
let context = Box::from_raw(opaque as *mut C);
let mut bytes_vec = Vec::new();
bytes_vec.extend_from_slice(slice::from_raw_parts(json as *mut u8, json_len));
let json_string = CString::from_vec_unchecked(bytes_vec).into_string();
match json_string {
Ok(json) => match serde_json::from_str(&json) {
Ok(stats) => (*context).stats(stats),
Err(e) => error!("Could not parse statistics JSON: {}", e),
},
Err(e) => error!("Statistics JSON string is not UTF-8: {:?}", e),
}
mem::forget(context); // Do not free the context
0 // librdkafka will free the json buffer
}
pub(crate) unsafe extern "C" fn native_error_cb<C: ClientContext>(
_client: *mut RDKafka,
err: i32,
reason: *const c_char,
opaque: *mut c_void,
) {
let err = rdsys::primitive_to_rd_kafka_resp_err_t(err)
.expect("global error not an rd_kafka_resp_err_t");
let error = KafkaError::Global(err.into());
let reason = CStr::from_ptr(reason).to_string_lossy();
let context = Box::from_raw(opaque as *mut C);
(*context).error(error, reason.trim());
mem::forget(context); // Do not free the context
}
#[cfg(test)]
mod tests {
// Just call everything to test there no panics by default, behavior
// is tested in the integrations tests.
use super::*;
use crate::config::ClientConfig;
#[test]
fn test_client() {
let config = ClientConfig::new();
let native_config = config.create_native_config().unwrap();
let client = Client::new(
&config,
native_config,
RDKafkaType::RD_KAFKA_PRODUCER,
DefaultClientContext,
)
.unwrap();
assert!(!client.native_ptr().is_null());
}
} | random_line_split |
|
client.rs | //! Common client functionalities.
use crate::rdsys;
use crate::rdsys::types::*;
use std::ffi::{CStr, CString};
use std::mem;
use std::os::raw::c_char;
use std::os::raw::c_void;
use std::ptr;
use std::slice;
use std::string::ToString;
use std::time::Duration;
use serde_json;
use crate::config::{ClientConfig, NativeClientConfig, RDKafkaLogLevel};
use crate::error::{IsError, KafkaError, KafkaResult};
use crate::groups::GroupList;
use crate::metadata::Metadata;
use crate::statistics::Statistics;
use crate::util::{timeout_to_ms, ErrBuf};
/// Client-level context
///
/// Each client (consumers and producers included) has a context object that can be used to
/// customize its behavior. Implementing `ClientContext` enables the customization of
/// methods common to all clients, while `ProducerContext` and `ConsumerContext` are specific to
/// producers and consumers. Refer to the list of methods to see which callbacks can currently
/// be overridden. Implementations of `ClientContext` must be thread safe, as they might be owned by
/// multiple threads.
pub trait ClientContext: Send + Sync {
/// Receives log lines from librdkafka.
fn log(&self, level: RDKafkaLogLevel, fac: &str, log_message: &str) {
match level {
RDKafkaLogLevel::Emerg
| RDKafkaLogLevel::Alert
| RDKafkaLogLevel::Critical
| RDKafkaLogLevel::Error => error!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Warning => warn!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Notice => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Info => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Debug => debug!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
}
}
/// Receives the statistics of the librdkafka client. To enable, the
/// "statistics.interval.ms" configuration parameter must be specified.
fn stats(&self, statistics: Statistics) {
info!("Client stats: {:?}", statistics);
}
/// Receives global errors from the librdkafka client.
fn error(&self, error: KafkaError, reason: &str) {
error!("librdkafka: {}: {}", error, reason);
}
// NOTE: when adding a new method, remember to add it to the FutureProducerContext as well.
// https://github.com/rust-lang/rfcs/pull/1406 will maybe help in the future.
}
/// An empty `ClientContext` that can be used when no context is needed. Default
/// callback implementations will be used.
#[derive(Clone, Default)]
pub struct DefaultClientContext;
impl ClientContext for DefaultClientContext {}
//
// ********** CLIENT **********
//
/// A native rdkafka-sys client. This struct shouldn't be used directly. Use higher level `Client`
/// or producers and consumers.
pub struct NativeClient {
ptr: *mut RDKafka,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeClient {}
unsafe impl Send for NativeClient {}
impl NativeClient {
/// Wraps a pointer to an RDKafka object and returns a new NativeClient.
pub(crate) unsafe fn from_ptr(ptr: *mut RDKafka) -> NativeClient {
NativeClient { ptr }
}
/// Returns the wrapped pointer to RDKafka.
pub fn ptr(&self) -> *mut RDKafka {
self.ptr
}
}
impl Drop for NativeClient {
fn drop(&mut self) {
trace!("Destroying client: {:p}", self.ptr);
unsafe {
rdsys::rd_kafka_destroy(self.ptr);
}
trace!("Client destroyed: {:?}", self.ptr);
}
}
/// A low level rdkafka client. This client shouldn't be used directly. The producer and consumer modules
/// provide different producer and consumer implementations based on top of `Client` that can be
/// used instead.
pub struct Client<C: ClientContext = DefaultClientContext> {
native: NativeClient,
context: Box<C>,
}
impl<C: ClientContext> Client<C> {
/// Creates a new `Client` given a configuration, a client type and a context.
pub fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
rd_kafka_type: RDKafkaType,
context: C,
) -> KafkaResult<Client<C>> {
let mut err_buf = ErrBuf::new();
let mut boxed_context = Box::new(context);
unsafe {
rdsys::rd_kafka_conf_set_opaque(
native_config.ptr(),
(&mut *boxed_context) as *mut C as *mut c_void,
)
};
unsafe { rdsys::rd_kafka_conf_set_log_cb(native_config.ptr(), Some(native_log_cb::<C>)) };
unsafe {
rdsys::rd_kafka_conf_set_stats_cb(native_config.ptr(), Some(native_stats_cb::<C>))
};
unsafe {
rdsys::rd_kafka_conf_set_error_cb(native_config.ptr(), Some(native_error_cb::<C>))
};
let client_ptr = unsafe {
rdsys::rd_kafka_new(
rd_kafka_type,
native_config.ptr_move(),
err_buf.as_mut_ptr(),
err_buf.len(),
)
};
trace!("Create new librdkafka client {:p}", client_ptr);
if client_ptr.is_null() {
return Err(KafkaError::ClientCreation(err_buf.to_string()));
}
unsafe { rdsys::rd_kafka_set_log_level(client_ptr, config.log_level as i32) };
Ok(Client {
native: unsafe { NativeClient::from_ptr(client_ptr) },
context: boxed_context,
})
}
/// Returns a reference to the native rdkafka-sys client.
pub fn native_client(&self) -> &NativeClient {
&self.native
}
/// Returns a pointer to the native rdkafka-sys client.
pub fn native_ptr(&self) -> *mut RDKafka {
self.native.ptr
}
/// Returns a reference to the context.
pub fn context(&self) -> &C {
self.context.as_ref()
}
/// Returns the metadata information for the specified topic, or for all topics in the cluster
/// if no topic is specified.
pub fn fetch_metadata<T: Into<Option<Duration>>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
let mut metadata_ptr: *const RDKafkaMetadata = ptr::null_mut();
let (flag, native_topic) = if let Some(topic_name) = topic {
(0, Some(self.native_topic(topic_name)?))
} else {
(1, None)
};
trace!("Starting metadata fetch");
let ret = unsafe {
rdsys::rd_kafka_metadata(
self.native_ptr(),
flag,
native_topic
.map(|t| t.ptr())
.unwrap_or_else(NativeTopic::null),
&mut metadata_ptr as *mut *const RDKafkaMetadata,
timeout_to_ms(timeout),
)
};
trace!("Metadata fetch completed");
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok(unsafe { Metadata::from_ptr(metadata_ptr) })
}
/// Returns high and low watermark for the specified topic and partition.
pub fn fetch_watermarks<T: Into<Option<Duration>>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
let mut low = -1;
let mut high = -1;
let topic_c = CString::new(topic.to_string())?;
let ret = unsafe {
rdsys::rd_kafka_query_watermark_offsets(
self.native_ptr(),
topic_c.as_ptr(),
partition,
&mut low as *mut i64,
&mut high as *mut i64,
timeout_to_ms(timeout),
)
};
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok((low, high))
}
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
pub fn fetch_group_list<T: Into<Option<Duration>>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
// Careful with group_c getting freed before time
let group_c = CString::new(group.map_or("".to_string(), ToString::to_string))?;
let group_c_ptr = if group.is_some() {
group_c.as_ptr()
} else {
ptr::null_mut()
};
let mut group_list_ptr: *const RDKafkaGroupList = ptr::null_mut();
trace!("Starting group list fetch");
let ret = unsafe {
rdsys::rd_kafka_list_groups(
self.native_ptr(),
group_c_ptr,
&mut group_list_ptr as *mut *const RDKafkaGroupList,
timeout_to_ms(timeout),
)
};
trace!("Group list fetch completed");
if ret.is_error() {
return Err(KafkaError::GroupListFetch(ret.into()));
}
Ok(unsafe { GroupList::from_ptr(group_list_ptr) })
}
/// Returns a NativeTopic from the current client. The NativeTopic shouldn't outlive the client
/// it was generated from.
fn native_topic(&self, topic: &str) -> KafkaResult<NativeTopic> {
let topic_c = CString::new(topic.to_string())?;
Ok(unsafe {
NativeTopic::from_ptr(rdsys::rd_kafka_topic_new(
self.native_ptr(),
topic_c.as_ptr(),
ptr::null_mut(),
))
})
}
/// Returns a NativeQueue from the current client. The NativeQueue shouldn't
/// outlive the client it was generated from.
pub(crate) fn new_native_queue(&self) -> NativeQueue {
unsafe { NativeQueue::from_ptr(rdsys::rd_kafka_queue_new(self.native_ptr())) }
}
}
struct NativeTopic {
ptr: *mut RDKafkaTopic,
}
unsafe impl Send for NativeTopic {}
unsafe impl Sync for NativeTopic {}
impl NativeTopic {
/// Wraps a pointer to an `RDKafkaTopic` object and returns a new `NativeTopic`.
unsafe fn from_ptr(ptr: *mut RDKafkaTopic) -> NativeTopic {
NativeTopic { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaTopic structure.
fn ptr(&self) -> *mut RDKafkaTopic {
self.ptr
}
/// Returns a null pointer.
fn null() -> *mut RDKafkaTopic {
ptr::null::<u8>() as *mut RDKafkaTopic
}
}
impl Drop for NativeTopic {
fn drop(&mut self) {
trace!("Destroying NativeTopic: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_topic_destroy(self.ptr);
}
trace!("NativeTopic destroyed: {:?}", self.ptr);
}
}
pub(crate) struct NativeQueue {
ptr: *mut RDKafkaQueue,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeQueue {}
unsafe impl Send for NativeQueue {}
impl NativeQueue {
/// Wraps a pointer to an `RDKafkaQueue` object and returns a new
/// `NativeQueue`.
unsafe fn from_ptr(ptr: *mut RDKafkaQueue) -> NativeQueue {
NativeQueue { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaQueue structure.
pub fn ptr(&self) -> *mut RDKafkaQueue {
self.ptr
}
pub fn poll<T: Into<Option<Duration>>>(&self, t: T) -> *mut RDKafkaEvent {
unsafe { rdsys::rd_kafka_queue_poll(self.ptr, timeout_to_ms(t)) }
}
}
impl Drop for NativeQueue {
fn drop(&mut self) {
trace!("Destroying queue: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_queue_destroy(self.ptr);
}
trace!("Queue destroyed: {:?}", self.ptr);
}
}
pub(crate) unsafe extern "C" fn native_log_cb<C: ClientContext>(
client: *const RDKafka,
level: i32,
fac: *const c_char,
buf: *const c_char,
) |
pub(crate) unsafe extern "C" fn native_stats_cb<C: ClientContext>(
_conf: *mut RDKafka,
json: *mut c_char,
json_len: usize,
opaque: *mut c_void,
) -> i32 {
let context = Box::from_raw(opaque as *mut C);
let mut bytes_vec = Vec::new();
bytes_vec.extend_from_slice(slice::from_raw_parts(json as *mut u8, json_len));
let json_string = CString::from_vec_unchecked(bytes_vec).into_string();
match json_string {
Ok(json) => match serde_json::from_str(&json) {
Ok(stats) => (*context).stats(stats),
Err(e) => error!("Could not parse statistics JSON: {}", e),
},
Err(e) => error!("Statistics JSON string is not UTF-8: {:?}", e),
}
mem::forget(context); // Do not free the context
0 // librdkafka will free the json buffer
}
pub(crate) unsafe extern "C" fn native_error_cb<C: ClientContext>(
_client: *mut RDKafka,
err: i32,
reason: *const c_char,
opaque: *mut c_void,
) {
let err = rdsys::primitive_to_rd_kafka_resp_err_t(err)
.expect("global error not an rd_kafka_resp_err_t");
let error = KafkaError::Global(err.into());
let reason = CStr::from_ptr(reason).to_string_lossy();
let context = Box::from_raw(opaque as *mut C);
(*context).error(error, reason.trim());
mem::forget(context); // Do not free the context
}
#[cfg(test)]
mod tests {
// Just call everything to test there no panics by default, behavior
// is tested in the integrations tests.
use super::*;
use crate::config::ClientConfig;
#[test]
fn test_client() {
let config = ClientConfig::new();
let native_config = config.create_native_config().unwrap();
let client = Client::new(
&config,
native_config,
RDKafkaType::RD_KAFKA_PRODUCER,
DefaultClientContext,
)
.unwrap();
assert!(!client.native_ptr().is_null());
}
}
| {
let fac = CStr::from_ptr(fac).to_string_lossy();
let log_message = CStr::from_ptr(buf).to_string_lossy();
let context = Box::from_raw(rdsys::rd_kafka_opaque(client) as *mut C);
(*context).log(
RDKafkaLogLevel::from_int(level),
fac.trim(),
log_message.trim(),
);
mem::forget(context); // Do not free the context
} | identifier_body |
client.rs | //! Common client functionalities.
use crate::rdsys;
use crate::rdsys::types::*;
use std::ffi::{CStr, CString};
use std::mem;
use std::os::raw::c_char;
use std::os::raw::c_void;
use std::ptr;
use std::slice;
use std::string::ToString;
use std::time::Duration;
use serde_json;
use crate::config::{ClientConfig, NativeClientConfig, RDKafkaLogLevel};
use crate::error::{IsError, KafkaError, KafkaResult};
use crate::groups::GroupList;
use crate::metadata::Metadata;
use crate::statistics::Statistics;
use crate::util::{timeout_to_ms, ErrBuf};
/// Client-level context
///
/// Each client (consumers and producers included) has a context object that can be used to
/// customize its behavior. Implementing `ClientContext` enables the customization of
/// methods common to all clients, while `ProducerContext` and `ConsumerContext` are specific to
/// producers and consumers. Refer to the list of methods to see which callbacks can currently
/// be overridden. Implementations of `ClientContext` must be thread safe, as they might be owned by
/// multiple threads.
pub trait ClientContext: Send + Sync {
/// Receives log lines from librdkafka.
fn log(&self, level: RDKafkaLogLevel, fac: &str, log_message: &str) {
match level {
RDKafkaLogLevel::Emerg
| RDKafkaLogLevel::Alert
| RDKafkaLogLevel::Critical
| RDKafkaLogLevel::Error => error!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Warning => warn!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Notice => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Info => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Debug => debug!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
}
}
/// Receives the statistics of the librdkafka client. To enable, the
/// "statistics.interval.ms" configuration parameter must be specified.
fn stats(&self, statistics: Statistics) {
info!("Client stats: {:?}", statistics);
}
/// Receives global errors from the librdkafka client.
fn error(&self, error: KafkaError, reason: &str) {
error!("librdkafka: {}: {}", error, reason);
}
// NOTE: when adding a new method, remember to add it to the FutureProducerContext as well.
// https://github.com/rust-lang/rfcs/pull/1406 will maybe help in the future.
}
/// An empty `ClientContext` that can be used when no context is needed. Default
/// callback implementations will be used.
#[derive(Clone, Default)]
pub struct DefaultClientContext;
impl ClientContext for DefaultClientContext {}
//
// ********** CLIENT **********
//
/// A native rdkafka-sys client. This struct shouldn't be used directly. Use higher level `Client`
/// or producers and consumers.
pub struct NativeClient {
ptr: *mut RDKafka,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeClient {}
unsafe impl Send for NativeClient {}
impl NativeClient {
/// Wraps a pointer to an RDKafka object and returns a new NativeClient.
pub(crate) unsafe fn from_ptr(ptr: *mut RDKafka) -> NativeClient {
NativeClient { ptr }
}
/// Returns the wrapped pointer to RDKafka.
pub fn ptr(&self) -> *mut RDKafka {
self.ptr
}
}
impl Drop for NativeClient {
fn drop(&mut self) {
trace!("Destroying client: {:p}", self.ptr);
unsafe {
rdsys::rd_kafka_destroy(self.ptr);
}
trace!("Client destroyed: {:?}", self.ptr);
}
}
/// A low level rdkafka client. This client shouldn't be used directly. The producer and consumer modules
/// provide different producer and consumer implementations based on top of `Client` that can be
/// used instead.
pub struct Client<C: ClientContext = DefaultClientContext> {
native: NativeClient,
context: Box<C>,
}
impl<C: ClientContext> Client<C> {
/// Creates a new `Client` given a configuration, a client type and a context.
pub fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
rd_kafka_type: RDKafkaType,
context: C,
) -> KafkaResult<Client<C>> {
let mut err_buf = ErrBuf::new();
let mut boxed_context = Box::new(context);
unsafe {
rdsys::rd_kafka_conf_set_opaque(
native_config.ptr(),
(&mut *boxed_context) as *mut C as *mut c_void,
)
};
unsafe { rdsys::rd_kafka_conf_set_log_cb(native_config.ptr(), Some(native_log_cb::<C>)) };
unsafe {
rdsys::rd_kafka_conf_set_stats_cb(native_config.ptr(), Some(native_stats_cb::<C>))
};
unsafe {
rdsys::rd_kafka_conf_set_error_cb(native_config.ptr(), Some(native_error_cb::<C>))
};
let client_ptr = unsafe {
rdsys::rd_kafka_new(
rd_kafka_type,
native_config.ptr_move(),
err_buf.as_mut_ptr(),
err_buf.len(),
)
};
trace!("Create new librdkafka client {:p}", client_ptr);
if client_ptr.is_null() {
return Err(KafkaError::ClientCreation(err_buf.to_string()));
}
unsafe { rdsys::rd_kafka_set_log_level(client_ptr, config.log_level as i32) };
Ok(Client {
native: unsafe { NativeClient::from_ptr(client_ptr) },
context: boxed_context,
})
}
/// Returns a reference to the native rdkafka-sys client.
pub fn native_client(&self) -> &NativeClient {
&self.native
}
/// Returns a pointer to the native rdkafka-sys client.
pub fn native_ptr(&self) -> *mut RDKafka {
self.native.ptr
}
/// Returns a reference to the context.
pub fn context(&self) -> &C {
self.context.as_ref()
}
/// Returns the metadata information for the specified topic, or for all topics in the cluster
/// if no topic is specified.
pub fn fetch_metadata<T: Into<Option<Duration>>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
let mut metadata_ptr: *const RDKafkaMetadata = ptr::null_mut();
let (flag, native_topic) = if let Some(topic_name) = topic {
(0, Some(self.native_topic(topic_name)?))
} else {
(1, None)
};
trace!("Starting metadata fetch");
let ret = unsafe {
rdsys::rd_kafka_metadata(
self.native_ptr(),
flag,
native_topic
.map(|t| t.ptr())
.unwrap_or_else(NativeTopic::null),
&mut metadata_ptr as *mut *const RDKafkaMetadata,
timeout_to_ms(timeout),
)
};
trace!("Metadata fetch completed");
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok(unsafe { Metadata::from_ptr(metadata_ptr) })
}
/// Returns high and low watermark for the specified topic and partition.
pub fn | <T: Into<Option<Duration>>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
let mut low = -1;
let mut high = -1;
let topic_c = CString::new(topic.to_string())?;
let ret = unsafe {
rdsys::rd_kafka_query_watermark_offsets(
self.native_ptr(),
topic_c.as_ptr(),
partition,
&mut low as *mut i64,
&mut high as *mut i64,
timeout_to_ms(timeout),
)
};
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok((low, high))
}
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
pub fn fetch_group_list<T: Into<Option<Duration>>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
// Careful with group_c getting freed before time
let group_c = CString::new(group.map_or("".to_string(), ToString::to_string))?;
let group_c_ptr = if group.is_some() {
group_c.as_ptr()
} else {
ptr::null_mut()
};
let mut group_list_ptr: *const RDKafkaGroupList = ptr::null_mut();
trace!("Starting group list fetch");
let ret = unsafe {
rdsys::rd_kafka_list_groups(
self.native_ptr(),
group_c_ptr,
&mut group_list_ptr as *mut *const RDKafkaGroupList,
timeout_to_ms(timeout),
)
};
trace!("Group list fetch completed");
if ret.is_error() {
return Err(KafkaError::GroupListFetch(ret.into()));
}
Ok(unsafe { GroupList::from_ptr(group_list_ptr) })
}
/// Returns a NativeTopic from the current client. The NativeTopic shouldn't outlive the client
/// it was generated from.
fn native_topic(&self, topic: &str) -> KafkaResult<NativeTopic> {
let topic_c = CString::new(topic.to_string())?;
Ok(unsafe {
NativeTopic::from_ptr(rdsys::rd_kafka_topic_new(
self.native_ptr(),
topic_c.as_ptr(),
ptr::null_mut(),
))
})
}
/// Returns a NativeQueue from the current client. The NativeQueue shouldn't
/// outlive the client it was generated from.
pub(crate) fn new_native_queue(&self) -> NativeQueue {
unsafe { NativeQueue::from_ptr(rdsys::rd_kafka_queue_new(self.native_ptr())) }
}
}
struct NativeTopic {
ptr: *mut RDKafkaTopic,
}
unsafe impl Send for NativeTopic {}
unsafe impl Sync for NativeTopic {}
impl NativeTopic {
/// Wraps a pointer to an `RDKafkaTopic` object and returns a new `NativeTopic`.
unsafe fn from_ptr(ptr: *mut RDKafkaTopic) -> NativeTopic {
NativeTopic { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaTopic structure.
fn ptr(&self) -> *mut RDKafkaTopic {
self.ptr
}
/// Returns a null pointer.
fn null() -> *mut RDKafkaTopic {
ptr::null::<u8>() as *mut RDKafkaTopic
}
}
impl Drop for NativeTopic {
fn drop(&mut self) {
trace!("Destroying NativeTopic: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_topic_destroy(self.ptr);
}
trace!("NativeTopic destroyed: {:?}", self.ptr);
}
}
pub(crate) struct NativeQueue {
ptr: *mut RDKafkaQueue,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeQueue {}
unsafe impl Send for NativeQueue {}
impl NativeQueue {
/// Wraps a pointer to an `RDKafkaQueue` object and returns a new
/// `NativeQueue`.
unsafe fn from_ptr(ptr: *mut RDKafkaQueue) -> NativeQueue {
NativeQueue { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaQueue structure.
pub fn ptr(&self) -> *mut RDKafkaQueue {
self.ptr
}
pub fn poll<T: Into<Option<Duration>>>(&self, t: T) -> *mut RDKafkaEvent {
unsafe { rdsys::rd_kafka_queue_poll(self.ptr, timeout_to_ms(t)) }
}
}
impl Drop for NativeQueue {
fn drop(&mut self) {
trace!("Destroying queue: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_queue_destroy(self.ptr);
}
trace!("Queue destroyed: {:?}", self.ptr);
}
}
pub(crate) unsafe extern "C" fn native_log_cb<C: ClientContext>(
client: *const RDKafka,
level: i32,
fac: *const c_char,
buf: *const c_char,
) {
let fac = CStr::from_ptr(fac).to_string_lossy();
let log_message = CStr::from_ptr(buf).to_string_lossy();
let context = Box::from_raw(rdsys::rd_kafka_opaque(client) as *mut C);
(*context).log(
RDKafkaLogLevel::from_int(level),
fac.trim(),
log_message.trim(),
);
mem::forget(context); // Do not free the context
}
pub(crate) unsafe extern "C" fn native_stats_cb<C: ClientContext>(
_conf: *mut RDKafka,
json: *mut c_char,
json_len: usize,
opaque: *mut c_void,
) -> i32 {
let context = Box::from_raw(opaque as *mut C);
let mut bytes_vec = Vec::new();
bytes_vec.extend_from_slice(slice::from_raw_parts(json as *mut u8, json_len));
let json_string = CString::from_vec_unchecked(bytes_vec).into_string();
match json_string {
Ok(json) => match serde_json::from_str(&json) {
Ok(stats) => (*context).stats(stats),
Err(e) => error!("Could not parse statistics JSON: {}", e),
},
Err(e) => error!("Statistics JSON string is not UTF-8: {:?}", e),
}
mem::forget(context); // Do not free the context
0 // librdkafka will free the json buffer
}
pub(crate) unsafe extern "C" fn native_error_cb<C: ClientContext>(
_client: *mut RDKafka,
err: i32,
reason: *const c_char,
opaque: *mut c_void,
) {
let err = rdsys::primitive_to_rd_kafka_resp_err_t(err)
.expect("global error not an rd_kafka_resp_err_t");
let error = KafkaError::Global(err.into());
let reason = CStr::from_ptr(reason).to_string_lossy();
let context = Box::from_raw(opaque as *mut C);
(*context).error(error, reason.trim());
mem::forget(context); // Do not free the context
}
#[cfg(test)]
mod tests {
// Just call everything to test there no panics by default, behavior
// is tested in the integrations tests.
use super::*;
use crate::config::ClientConfig;
#[test]
fn test_client() {
let config = ClientConfig::new();
let native_config = config.create_native_config().unwrap();
let client = Client::new(
&config,
native_config,
RDKafkaType::RD_KAFKA_PRODUCER,
DefaultClientContext,
)
.unwrap();
assert!(!client.native_ptr().is_null());
}
}
| fetch_watermarks | identifier_name |
pageserver.rs | backend::AuthType};
use anyhow::{bail, ensure, Context, Result};
use clap::{App, Arg, ArgMatches};
use daemonize::Daemonize;
use pageserver::{
branches,
defaults::{DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR},
http, page_service, tenant_mgr, PageServerConf, RelishStorageConfig, S3Config, LOG_FILE_NAME,
};
use zenith_utils::http::endpoint;
use const_format::formatcp;
/// String arguments that can be declared via CLI or config file
#[derive(Serialize, Deserialize)]
struct CfgFileParams {
listen_pg_addr: Option<String>,
listen_http_addr: Option<String>,
checkpoint_distance: Option<String>,
checkpoint_period: Option<String>,
gc_horizon: Option<String>,
gc_period: Option<String>,
pg_distrib_dir: Option<String>,
auth_validation_public_key_path: Option<String>,
auth_type: Option<String>,
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
relish_storage: Option<RelishStorage>,
}
#[derive(Serialize, Deserialize, Clone)]
enum RelishStorage {
Local {
local_path: String,
},
AwsS3 {
bucket_name: String,
bucket_region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
}
impl CfgFileParams {
/// Extract string arguments from CLI
fn from_args(arg_matches: &ArgMatches) -> Self {
let get_arg = |arg_name: &str| -> Option<String> {
arg_matches.value_of(arg_name).map(str::to_owned)
};
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
Some(RelishStorage::Local { local_path })
} else if let Some((bucket_name, bucket_region)) =
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if!pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir,
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn | () -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
}
// The configuration is all set up now. Turn it into a'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(¶ms)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
}
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
// Initialize logger
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(e) => error!("Error, {}", e),
}
}
// Initialize tenant manager.
tenant_mgr::init(conf);
// keep join handles for spawned threads
let mut join_handles = vec![];
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())
}
};
info!("Using auth: {:#?}", conf.auth_type);
// Spawn a new thread for the http endpoint
// bind before launching separate thread so the error reported before startup exits
let cloned = auth.clone();
let http_endpoint_thread = thread::Builder::new()
.name("http_endpoint_thread".into())
.spawn(move || {
let router = http::make_router(conf, cloned);
endpoint::serve_thread_main(router, http_listener)
})?;
join_handles.push(http_endpoint_thread);
// Spawn a thread | main | identifier_name |
pageserver.rs | backend::AuthType};
use anyhow::{bail, ensure, Context, Result};
use clap::{App, Arg, ArgMatches};
use daemonize::Daemonize;
use pageserver::{
branches,
defaults::{DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR},
http, page_service, tenant_mgr, PageServerConf, RelishStorageConfig, S3Config, LOG_FILE_NAME,
};
use zenith_utils::http::endpoint;
use const_format::formatcp;
/// String arguments that can be declared via CLI or config file
#[derive(Serialize, Deserialize)]
struct CfgFileParams {
listen_pg_addr: Option<String>,
listen_http_addr: Option<String>,
checkpoint_distance: Option<String>,
checkpoint_period: Option<String>,
gc_horizon: Option<String>,
gc_period: Option<String>,
pg_distrib_dir: Option<String>,
auth_validation_public_key_path: Option<String>,
auth_type: Option<String>,
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
relish_storage: Option<RelishStorage>,
}
#[derive(Serialize, Deserialize, Clone)]
enum RelishStorage {
Local {
local_path: String,
},
AwsS3 {
bucket_name: String,
bucket_region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
}
impl CfgFileParams {
/// Extract string arguments from CLI
fn from_args(arg_matches: &ArgMatches) -> Self {
let get_arg = |arg_name: &str| -> Option<String> {
arg_matches.value_of(arg_name).map(str::to_owned)
};
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
Some(RelishStorage::Local { local_path })
} else if let Some((bucket_name, bucket_region)) =
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if!pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir,
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
}
// The configuration is all set up now. Turn it into a'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(¶ms)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
}
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
// Initialize logger
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize |
// Initialize tenant manager.
tenant_mgr::init(conf);
// keep join handles for spawned threads
let mut join_handles = vec![];
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())
}
};
info!("Using auth: {:#?}", conf.auth_type);
// Spawn a new thread for the http endpoint
// bind before launching separate thread so the error reported before startup exits
let cloned = auth.clone();
let http_endpoint_thread = thread::Builder::new()
.name("http_endpoint_thread".into())
.spawn(move || {
let router = http::make_router(conf, cloned);
endpoint::serve_thread_main(router, http_listener)
})?;
join_handles.push(http_endpoint_thread);
// Spawn a thread | {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(e) => error!("Error, {}", e),
}
} | conditional_block |
pageserver.rs | _backend::AuthType};
use anyhow::{bail, ensure, Context, Result};
use clap::{App, Arg, ArgMatches};
use daemonize::Daemonize;
use pageserver::{
branches,
defaults::{DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR},
http, page_service, tenant_mgr, PageServerConf, RelishStorageConfig, S3Config, LOG_FILE_NAME,
};
use zenith_utils::http::endpoint;
use const_format::formatcp;
/// String arguments that can be declared via CLI or config file
#[derive(Serialize, Deserialize)]
struct CfgFileParams {
listen_pg_addr: Option<String>,
listen_http_addr: Option<String>,
checkpoint_distance: Option<String>,
checkpoint_period: Option<String>,
gc_horizon: Option<String>,
gc_period: Option<String>,
pg_distrib_dir: Option<String>,
auth_validation_public_key_path: Option<String>,
auth_type: Option<String>,
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
relish_storage: Option<RelishStorage>,
}
#[derive(Serialize, Deserialize, Clone)]
enum RelishStorage {
Local {
local_path: String,
},
AwsS3 {
bucket_name: String,
bucket_region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
}
impl CfgFileParams {
/// Extract string arguments from CLI
fn from_args(arg_matches: &ArgMatches) -> Self {
let get_arg = |arg_name: &str| -> Option<String> {
arg_matches.value_of(arg_name).map(str::to_owned)
};
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
Some(RelishStorage::Local { local_path })
} else if let Some((bucket_name, bucket_region)) =
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if!pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir, |
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
}
// The configuration is all set up now. Turn it into a'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(¶ms)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
}
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
// Initialize logger
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(e) => error!("Error, {}", e),
}
}
// Initialize tenant manager.
tenant_mgr::init(conf);
// keep join handles for spawned threads
let mut join_handles = vec![];
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())
}
};
info!("Using auth: {:#?}", conf.auth_type);
// Spawn a new thread for the http endpoint
// bind before launching separate thread so the error reported before startup exits
let cloned = auth.clone();
let http_endpoint_thread = thread::Builder::new()
.name("http_endpoint_thread".into())
.spawn(move || {
let router = http::make_router(conf, cloned);
endpoint::serve_thread_main(router, http_listener)
})?;
join_handles.push(http_endpoint_thread);
// Spawn a thread to | random_line_split |
|
executor.rs | //! Functions for setting configuration and executing the generator.
use cpp_to_rust_generator::common::errors::Result;
use cpp_to_rust_generator::common::{log, toml};
use cpp_to_rust_generator::common::file_utils::{PathBufWithAdded, repo_crate_local_path};
use cpp_to_rust_generator::config::{Config, CacheUsage, DebugLoggingConfig, exec};
use cpp_to_rust_generator::cpp_data::CppVisibility;
use cpp_to_rust_generator::common::cpp_build_config::{CppBuildConfigData, CppLibraryType};
use cpp_to_rust_generator::common::target;
use qt_generator_common::{get_installation_data, lib_folder_name, lib_dependencies};
use std::path::PathBuf;
use versions;
use doc_parser::DocParser;
use fix_header_names::fix_header_names;
use cpp_to_rust_generator::cpp_method::CppMethod;
use cpp_to_rust_generator::cpp_data::CppTypeKind;
use cpp_to_rust_generator::config::{CrateProperties, is_completed};
use doc_decoder::DocData;
use lib_configs;
/// Options passed to `exec_all`,
/// as in `cpp_to_rust_generator::config::Config`.
pub struct ExecConfig {
pub write_dependencies_local_paths: bool,
pub cache_usage: CacheUsage,
pub write_cache: bool,
pub debug_logging_config: DebugLoggingConfig,
pub quiet_mode: bool,
}
/// Executes generator for `libs` with given configuration.
pub fn exec_all(libs: Vec<String>,
cache_dir: PathBuf,
output_dir: PathBuf,
config: ExecConfig)
-> Result<()> {
if config.quiet_mode {
let mut logger = log::default_logger();
logger.set_category_settings(log::Status,
log::LoggerSettings {
file_path: None,
write_to_stderr: false,
});
}
let crate_templates_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).with_added("crate_templates");
let final_libs = if libs.iter().any(|x| x == "all") {
vec!["core".to_string(),
"gui".to_string(),
"widgets".to_string(),
"ui_tools".to_string(),
"3d_core".to_string(),
"3d_render".to_string(),
"3d_input".to_string(),
"3d_logic".to_string(),
"3d_extras".to_string()]
} else {
libs
};
let mut configs: Vec<Config> = Vec::new();
for sublib_name in final_libs {
let lib_cache_dir = cache_dir.with_added(format!("qt_{}", sublib_name));
let lib_crate_templates_path = crate_templates_path.with_added(&sublib_name);
let lib_output_dir = output_dir.with_added(format!("qt_{}", sublib_name));
let mut dependency_paths = Vec::new();
for dep in lib_dependencies(&sublib_name)? {
let path = cache_dir.with_added(format!("qt_{}", dep));
if!configs.iter().any(|c| c.cache_dir_path() == &path) &&!is_completed(&path) {
return Err(format!("\"{}\" depends on \"{}\" but processing \
in \"{}\" directory is not completed.",
sublib_name,
dep,
path.display())
.into());
}
dependency_paths.push(path);
}
if is_completed(&lib_cache_dir) && config.cache_usage.can_skip_all() {
log::status("No processing! cpp_to_rust uses previous results.");
log::status("Run with -C0 to force full processing.");
continue;
}
configs.push(make_config(&sublib_name,
lib_cache_dir,
lib_output_dir,
lib_crate_templates_path,
dependency_paths,
&config)?);
}
exec(configs.into_iter())?;
Ok(())
}
/// Executes the generator for a single Qt module with given configuration.
fn make_config(sublib_name: &str,
cache_dir: PathBuf,
output_dir: PathBuf,
crate_templates_path: PathBuf,
dependency_paths: Vec<PathBuf>,
exec_config: &ExecConfig)
-> Result<Config> {
log::status(format!("Preparing generator config for library: {}", sublib_name));
let crate_name = format!("qt_{}", sublib_name);
let mut crate_properties = CrateProperties::new(crate_name.clone(),
versions::QT_OUTPUT_CRATES_VERSION);
let mut custom_fields = toml::value::Table::new();
let mut package_data = toml::value::Table::new();
package_data.insert("authors".to_string(),
toml::Value::Array(vec![toml::Value::String("Pavel Strakhov <[email protected]>"
.to_string())]));
let description = format!("Bindings for {} C++ library (generated automatically with cpp_to_rust project)",
lib_folder_name(sublib_name));
package_data.insert("description".to_string(), toml::Value::String(description));
let doc_url = format!("https://rust-qt.github.io/rustdoc/qt/{}", &crate_name);
package_data.insert("documentation".to_string(), toml::Value::String(doc_url));
package_data.insert("repository".to_string(),
toml::Value::String("https://github.com/rust-qt/cpp_to_rust".to_string()));
package_data.insert("license".to_string(),
toml::Value::String("MIT".to_string()));
custom_fields.insert("package".to_string(), toml::Value::Table(package_data));
crate_properties.set_custom_fields(custom_fields);
crate_properties.remove_default_build_dependencies();
let qt_build_tools_path = if exec_config.write_dependencies_local_paths {
Some(repo_crate_local_path("qt_generator/qt_build_tools")?)
} else {
None
};
crate_properties.add_build_dependency("qt_build_tools",
versions::QT_BUILD_TOOLS_VERSION,
qt_build_tools_path);
let mut config = Config::new(&output_dir, &cache_dir, crate_properties);
let installation_data = get_installation_data(sublib_name)?;
config.add_include_path(&installation_data.root_include_path);
config.add_include_path(&installation_data.lib_include_path);
for dep in lib_dependencies(&sublib_name)? {
let dep_data = get_installation_data(dep)?;
config.add_include_path(&dep_data.lib_include_path);
}
config.add_target_include_path(&installation_data.lib_include_path);
config.set_cache_usage(exec_config.cache_usage.clone());
config.set_write_dependencies_local_paths(exec_config.write_dependencies_local_paths);
config.set_write_cache(exec_config.write_cache);
config.set_quiet_mode(exec_config.quiet_mode);
config.set_debug_logging_config(exec_config.debug_logging_config.clone());
config.set_cpp_lib_version(installation_data.qt_version.as_str());
if exec_config.write_dependencies_local_paths {
log::status("Output Cargo.toml file will contain local paths of used dependencies \
(use --no-local-paths to disable).");
} else {
log::status("Local paths will not be written to the output crate. Make sure all dependencies \
are published before trying to compile the crate.");
}
// TODO: does parsing work on MacOS without adding "-F"?
config.add_include_directive(&lib_folder_name(sublib_name));
let lib_include_path = installation_data.lib_include_path.clone();
config.add_cpp_data_filter(move |cpp_data| fix_header_names(cpp_data, &lib_include_path));
config.add_cpp_parser_arguments(vec!["-fPIC", "-fcxx-exceptions"]);
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-std=gnu++11");
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-fPIC");
// msvc and mingw don't need this
config
.cpp_build_config_mut()
.add(target::Condition::OS(target::OS::Windows).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.set_library_type(CppLibraryType::Shared);
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc), data);
}
if target::current_env() == target::Env::Msvc {
config.add_cpp_parser_argument("-std=c++14");
} else {
config.add_cpp_parser_argument("-std=gnu++11");
}
config.add_cpp_parser_blocked_name("qt_check_for_QGADGET_macro");
let sublib_name_clone = sublib_name.to_string();
let docs_path = installation_data.docs_path.clone();
config.add_cpp_data_filter(move |cpp_data| {
match DocData::new(&sublib_name_clone, &docs_path) {
Ok(doc_data) => {
let mut parser = DocParser::new(doc_data);
find_methods_docs(&mut cpp_data.methods, &mut parser)?; | type1.doc = Some(doc.0);
if let CppTypeKind::Enum { ref mut values } = type1.kind {
let enum_namespace = if let Some(index) = type1.name.rfind("::") {
type1.name[0..index + 2].to_string()
} else {
String::new()
};
for value in values {
if let Some(r) = doc.1.iter().find(|x| x.name == value.name) {
value.doc = Some(r.html.clone());
// let full_name = format!("{}::{}", enum_namespace, &value.name);
// println!("full name: {}", full_name);
parser.mark_enum_variant_used(&format!("{}{}", enum_namespace, &value.name));
} else {
let type_name = &type1.name;
log::llog(log::DebugQtDoc, || {
format!("Not found doc for enum variant: {}::{}",
type_name,
&value.name)
});
}
}
}
}
Err(err) => {
log::llog(log::DebugQtDoc,
|| format!("Not found doc for type: {}: {}", type1.name, err));
}
}
}
parser.report_unused_anchors();
}
Err(err) => {
log::error(format!("Failed to get Qt documentation: {}", err));
err.discard_expected();
}
}
Ok(())
});
config.set_crate_template_path(crate_templates_path);
match sublib_name {
"core" => lib_configs::core(&mut config)?,
"gui" => lib_configs::gui(&mut config)?,
"widgets" => lib_configs::widgets(&mut config)?,
"3d_core" => lib_configs::core_3d(&mut config)?,
"3d_render" => lib_configs::render_3d(&mut config)?,
"3d_input" => lib_configs::input_3d(&mut config)?,
"3d_logic" => lib_configs::logic_3d(&mut config)?,
"3d_extras" => lib_configs::extras_3d(&mut config)?,
"ui_tools" => {}
_ => return Err(format!("Unknown lib name: {}", sublib_name).into()),
}
config.set_dependency_cache_paths(dependency_paths);
Ok(config)
}
/// Adds documentation from `data` to `cpp_methods`.
fn find_methods_docs(cpp_methods: &mut [CppMethod], data: &mut DocParser) -> Result<()> {
for cpp_method in cpp_methods {
if let Some(ref info) = cpp_method.class_membership {
if info.visibility == CppVisibility::Private {
continue;
}
}
if let Some(ref declaration_code) = cpp_method.declaration_code {
match data.doc_for_method(&cpp_method.doc_id(),
declaration_code,
&cpp_method.short_text()) {
Ok(doc) => cpp_method.doc = Some(doc),
Err(msg) => {
if cpp_method.class_membership.is_some() &&
(&cpp_method.name == "tr" || &cpp_method.name == "trUtf8" ||
&cpp_method.name == "metaObject") {
// no error message
} else {
log::llog(log::DebugQtDoc, || {
format!("Failed to get documentation for method: {}: {}",
&cpp_method.short_text(),
msg)
});
}
}
}
}
}
Ok(())
} | for type1 in &mut cpp_data.types {
match parser.doc_for_type(&type1.name) {
Ok(doc) => {
// log::debug(format!("Found doc for type: {}", type1.name)); | random_line_split |
executor.rs | //! Functions for setting configuration and executing the generator.
use cpp_to_rust_generator::common::errors::Result;
use cpp_to_rust_generator::common::{log, toml};
use cpp_to_rust_generator::common::file_utils::{PathBufWithAdded, repo_crate_local_path};
use cpp_to_rust_generator::config::{Config, CacheUsage, DebugLoggingConfig, exec};
use cpp_to_rust_generator::cpp_data::CppVisibility;
use cpp_to_rust_generator::common::cpp_build_config::{CppBuildConfigData, CppLibraryType};
use cpp_to_rust_generator::common::target;
use qt_generator_common::{get_installation_data, lib_folder_name, lib_dependencies};
use std::path::PathBuf;
use versions;
use doc_parser::DocParser;
use fix_header_names::fix_header_names;
use cpp_to_rust_generator::cpp_method::CppMethod;
use cpp_to_rust_generator::cpp_data::CppTypeKind;
use cpp_to_rust_generator::config::{CrateProperties, is_completed};
use doc_decoder::DocData;
use lib_configs;
/// Options passed to `exec_all`,
/// as in `cpp_to_rust_generator::config::Config`.
pub struct ExecConfig {
pub write_dependencies_local_paths: bool,
pub cache_usage: CacheUsage,
pub write_cache: bool,
pub debug_logging_config: DebugLoggingConfig,
pub quiet_mode: bool,
}
/// Executes generator for `libs` with given configuration.
pub fn exec_all(libs: Vec<String>,
cache_dir: PathBuf,
output_dir: PathBuf,
config: ExecConfig)
-> Result<()> {
if config.quiet_mode {
let mut logger = log::default_logger();
logger.set_category_settings(log::Status,
log::LoggerSettings {
file_path: None,
write_to_stderr: false,
});
}
let crate_templates_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).with_added("crate_templates");
let final_libs = if libs.iter().any(|x| x == "all") {
vec!["core".to_string(),
"gui".to_string(),
"widgets".to_string(),
"ui_tools".to_string(),
"3d_core".to_string(),
"3d_render".to_string(),
"3d_input".to_string(),
"3d_logic".to_string(),
"3d_extras".to_string()]
} else {
libs
};
let mut configs: Vec<Config> = Vec::new();
for sublib_name in final_libs {
let lib_cache_dir = cache_dir.with_added(format!("qt_{}", sublib_name));
let lib_crate_templates_path = crate_templates_path.with_added(&sublib_name);
let lib_output_dir = output_dir.with_added(format!("qt_{}", sublib_name));
let mut dependency_paths = Vec::new();
for dep in lib_dependencies(&sublib_name)? {
let path = cache_dir.with_added(format!("qt_{}", dep));
if!configs.iter().any(|c| c.cache_dir_path() == &path) &&!is_completed(&path) {
return Err(format!("\"{}\" depends on \"{}\" but processing \
in \"{}\" directory is not completed.",
sublib_name,
dep,
path.display())
.into());
}
dependency_paths.push(path);
}
if is_completed(&lib_cache_dir) && config.cache_usage.can_skip_all() {
log::status("No processing! cpp_to_rust uses previous results.");
log::status("Run with -C0 to force full processing.");
continue;
}
configs.push(make_config(&sublib_name,
lib_cache_dir,
lib_output_dir,
lib_crate_templates_path,
dependency_paths,
&config)?);
}
exec(configs.into_iter())?;
Ok(())
}
/// Executes the generator for a single Qt module with given configuration.
fn make_config(sublib_name: &str,
cache_dir: PathBuf,
output_dir: PathBuf,
crate_templates_path: PathBuf,
dependency_paths: Vec<PathBuf>,
exec_config: &ExecConfig)
-> Result<Config> {
log::status(format!("Preparing generator config for library: {}", sublib_name));
let crate_name = format!("qt_{}", sublib_name);
let mut crate_properties = CrateProperties::new(crate_name.clone(),
versions::QT_OUTPUT_CRATES_VERSION);
let mut custom_fields = toml::value::Table::new();
let mut package_data = toml::value::Table::new();
package_data.insert("authors".to_string(),
toml::Value::Array(vec![toml::Value::String("Pavel Strakhov <[email protected]>"
.to_string())]));
let description = format!("Bindings for {} C++ library (generated automatically with cpp_to_rust project)",
lib_folder_name(sublib_name));
package_data.insert("description".to_string(), toml::Value::String(description));
let doc_url = format!("https://rust-qt.github.io/rustdoc/qt/{}", &crate_name);
package_data.insert("documentation".to_string(), toml::Value::String(doc_url));
package_data.insert("repository".to_string(),
toml::Value::String("https://github.com/rust-qt/cpp_to_rust".to_string()));
package_data.insert("license".to_string(),
toml::Value::String("MIT".to_string()));
custom_fields.insert("package".to_string(), toml::Value::Table(package_data));
crate_properties.set_custom_fields(custom_fields);
crate_properties.remove_default_build_dependencies();
let qt_build_tools_path = if exec_config.write_dependencies_local_paths {
Some(repo_crate_local_path("qt_generator/qt_build_tools")?)
} else {
None
};
crate_properties.add_build_dependency("qt_build_tools",
versions::QT_BUILD_TOOLS_VERSION,
qt_build_tools_path);
let mut config = Config::new(&output_dir, &cache_dir, crate_properties);
let installation_data = get_installation_data(sublib_name)?;
config.add_include_path(&installation_data.root_include_path);
config.add_include_path(&installation_data.lib_include_path);
for dep in lib_dependencies(&sublib_name)? {
let dep_data = get_installation_data(dep)?;
config.add_include_path(&dep_data.lib_include_path);
}
config.add_target_include_path(&installation_data.lib_include_path);
config.set_cache_usage(exec_config.cache_usage.clone());
config.set_write_dependencies_local_paths(exec_config.write_dependencies_local_paths);
config.set_write_cache(exec_config.write_cache);
config.set_quiet_mode(exec_config.quiet_mode);
config.set_debug_logging_config(exec_config.debug_logging_config.clone());
config.set_cpp_lib_version(installation_data.qt_version.as_str());
if exec_config.write_dependencies_local_paths {
log::status("Output Cargo.toml file will contain local paths of used dependencies \
(use --no-local-paths to disable).");
} else {
log::status("Local paths will not be written to the output crate. Make sure all dependencies \
are published before trying to compile the crate.");
}
// TODO: does parsing work on MacOS without adding "-F"?
config.add_include_directive(&lib_folder_name(sublib_name));
let lib_include_path = installation_data.lib_include_path.clone();
config.add_cpp_data_filter(move |cpp_data| fix_header_names(cpp_data, &lib_include_path));
config.add_cpp_parser_arguments(vec!["-fPIC", "-fcxx-exceptions"]);
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-std=gnu++11");
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-fPIC");
// msvc and mingw don't need this
config
.cpp_build_config_mut()
.add(target::Condition::OS(target::OS::Windows).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.set_library_type(CppLibraryType::Shared);
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc), data);
}
if target::current_env() == target::Env::Msvc {
config.add_cpp_parser_argument("-std=c++14");
} else {
config.add_cpp_parser_argument("-std=gnu++11");
}
config.add_cpp_parser_blocked_name("qt_check_for_QGADGET_macro");
let sublib_name_clone = sublib_name.to_string();
let docs_path = installation_data.docs_path.clone();
config.add_cpp_data_filter(move |cpp_data| {
match DocData::new(&sublib_name_clone, &docs_path) {
Ok(doc_data) => {
let mut parser = DocParser::new(doc_data);
find_methods_docs(&mut cpp_data.methods, &mut parser)?;
for type1 in &mut cpp_data.types {
match parser.doc_for_type(&type1.name) {
Ok(doc) => {
// log::debug(format!("Found doc for type: {}", type1.name));
type1.doc = Some(doc.0);
if let CppTypeKind::Enum { ref mut values } = type1.kind {
let enum_namespace = if let Some(index) = type1.name.rfind("::") {
type1.name[0..index + 2].to_string()
} else {
String::new()
};
for value in values {
if let Some(r) = doc.1.iter().find(|x| x.name == value.name) {
value.doc = Some(r.html.clone());
// let full_name = format!("{}::{}", enum_namespace, &value.name);
// println!("full name: {}", full_name);
parser.mark_enum_variant_used(&format!("{}{}", enum_namespace, &value.name));
} else {
let type_name = &type1.name;
log::llog(log::DebugQtDoc, || {
format!("Not found doc for enum variant: {}::{}",
type_name,
&value.name)
});
}
}
}
}
Err(err) => {
log::llog(log::DebugQtDoc,
|| format!("Not found doc for type: {}: {}", type1.name, err));
}
}
}
parser.report_unused_anchors();
}
Err(err) => {
log::error(format!("Failed to get Qt documentation: {}", err));
err.discard_expected();
}
}
Ok(())
});
config.set_crate_template_path(crate_templates_path);
match sublib_name {
"core" => lib_configs::core(&mut config)?,
"gui" => lib_configs::gui(&mut config)?,
"widgets" => lib_configs::widgets(&mut config)?,
"3d_core" => lib_configs::core_3d(&mut config)?,
"3d_render" => lib_configs::render_3d(&mut config)?,
"3d_input" => lib_configs::input_3d(&mut config)?,
"3d_logic" => lib_configs::logic_3d(&mut config)?,
"3d_extras" => lib_configs::extras_3d(&mut config)?,
"ui_tools" => {}
_ => return Err(format!("Unknown lib name: {}", sublib_name).into()),
}
config.set_dependency_cache_paths(dependency_paths);
Ok(config)
}
/// Adds documentation from `data` to `cpp_methods`.
fn | (cpp_methods: &mut [CppMethod], data: &mut DocParser) -> Result<()> {
for cpp_method in cpp_methods {
if let Some(ref info) = cpp_method.class_membership {
if info.visibility == CppVisibility::Private {
continue;
}
}
if let Some(ref declaration_code) = cpp_method.declaration_code {
match data.doc_for_method(&cpp_method.doc_id(),
declaration_code,
&cpp_method.short_text()) {
Ok(doc) => cpp_method.doc = Some(doc),
Err(msg) => {
if cpp_method.class_membership.is_some() &&
(&cpp_method.name == "tr" || &cpp_method.name == "trUtf8" ||
&cpp_method.name == "metaObject") {
// no error message
} else {
log::llog(log::DebugQtDoc, || {
format!("Failed to get documentation for method: {}: {}",
&cpp_method.short_text(),
msg)
});
}
}
}
}
}
Ok(())
}
| find_methods_docs | identifier_name |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
}
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1,...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2!= 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> |
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn generate_password_reset_token() -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
}
| {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
} | identifier_body |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
}
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new(); | }
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1,...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2!= 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn generate_password_reset_token() -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
} | for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]); | random_line_split |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
}
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1,...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2!= 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn | () -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
}
| generate_password_reset_token | identifier_name |
set3.rs | use std::collections::VecDeque;
use std::time::{SystemTime, UNIX_EPOCH};
use std::{u8, u16};
use rand::{self, Rng};
use rand::distributions::{IndependentSample, Range};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use errors::*;
use prelude::*;
use set1::{decrypt_single_byte_xor_cipher, break_repeating_key_xor};
lazy_static! {
static ref CBC_PADDING_ORACLE_KEY: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_ORACLE_IV: Vec<u8> = random_bytes(16).unwrap();
static ref CBC_PADDING_STRINGS: Vec<Vec<u8>> = {
let mut result = Vec::new();
result.push(from_base64_string("MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=").unwrap());
result.push(from_base64_string("MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=").unwrap());
result.push(from_base64_string("MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==").unwrap());
result.push(from_base64_string("MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==").unwrap());
result.push(from_base64_string("MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl").unwrap());
result.push(from_base64_string("MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==").unwrap());
result.push(from_base64_string("MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==").unwrap());
result.push(from_base64_string("MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=").unwrap());
result.push(from_base64_string("MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=").unwrap());
result.push(from_base64_string("MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93").unwrap());
result
};
}
pub fn random_ciphertext() -> Result<(Vec<u8>, Vec<u8>)> {
let mut rng = rand::thread_rng();
let plaintext = rng.choose(&CBC_PADDING_STRINGS).unwrap();
let padded_plaintext = pad_pkcs7(plaintext, 16);
aes_128_cbc_encrypt_no_padding(&CBC_PADDING_ORACLE_KEY,
&CBC_PADDING_ORACLE_IV,
&padded_plaintext)
.map(|ciphertext| (ciphertext, CBC_PADDING_ORACLE_IV.to_vec()))
}
pub fn padding_oracle(ciphertext: &[u8]) -> bool {
aes_128_cbc_decrypt_no_padding(&CBC_PADDING_ORACLE_KEY, &CBC_PADDING_ORACLE_IV, ciphertext)
.map(|plaintext| is_pkcs7_padded(&plaintext))
.unwrap_or(false)
}
pub fn decrypt_ciphertext(ciphertext: &[u8], iv: &[u8]) -> Result<Vec<u8>> {
// the key idea, is that plaintext xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) |
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1,...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2!= 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn generate_password_reset_token() -> Result<Vec<u8>> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"user_id=123456&expires=1000");
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
let key: Vec<_> =
MersenneTwister::new(unix_timestamp).keystream().take(plaintext.len()).collect();
Ok(fixed_xor(&plaintext, &key))
}
pub fn is_password_token_using_mt19937(token: &[u8]) -> Result<bool> {
let unix_duration = SystemTime::now().duration_since(UNIX_EPOCH)?;
let unix_timestamp = unix_duration.as_secs() as u32;
Ok((0u32..10000u32)
.into_par_iter()
.map(|i| {
let key: Vec<_> =
MersenneTwister::new(unix_timestamp - i).keystream().take(token.len()).collect();
fixed_xor(token, &key)
})
.find_any(|plaintext| {
plaintext.windows(b"user_id=".len()).position(|window| window == b"user_id=").is_some()
})
.is_some())
}
| {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
} | conditional_block |
mm.rs | collections::BTreeMap;
use crate::acpi::MAX_CORES;
use rangeset::RangeSet;
use boot_args::{KERNEL_PHYS_WINDOW_BASE, KERNEL_PHYS_WINDOW_SIZE};
use boot_args::KERNEL_VMEM_BASE;
use page_table::{PhysMem, PhysAddr, PageType, VirtAddr};
/// Table which is indexed by an APIC identifier to map to a physical range
/// which is local to it its NUMA node
static APIC_TO_MEMORY_RANGE: AtomicPtr<[Option<RangeSet>; MAX_CORES]> =
AtomicPtr::new(core::ptr::null_mut());
/// Get the preferred memory range for the currently running APIC. Returns
/// `None` if we have no valid APIC ID yet, or we do not have NUMA knowledge
/// of the current APIC ID
pub fn memory_range<'a>() -> Option<&'a RangeSet> {
// Check to see if the `APIC_TO_MEMORY_RANGE` has been initialized
let atmr = APIC_TO_MEMORY_RANGE.load(Ordering::SeqCst);
if atmr.is_null() {
return None;
}
// Cast the memory range structure to something we can access
let atmr = unsafe { &*atmr };
// Based on our current APIC ID look up the memory range
core!().apic_id().and_then(|x| atmr[x as usize].as_ref())
}
/// Establish the `APIC_TO_MEMORY_RANGE` global with the APIC IDs to their
/// corresponding NUMA-local memory regions
pub unsafe fn register_numa_nodes(apic_to_domain: BTreeMap<u32, u32>,
domain_to_mem: BTreeMap<u32, RangeSet>) {
// Create a heap-based database
let mut apic_mappings: Box<MaybeUninit<[Option<RangeSet>; MAX_CORES]>> =
Box::new_uninit();
// Initialize the heap based memory
for core in 0..MAX_CORES {
let foo = apic_mappings.as_mut_ptr() as *mut Option<RangeSet>;
core::ptr::write(foo.offset(core as isize), None);
}
// APIC mappings are now initialized
let mut apic_mappings = apic_mappings.assume_init();
// Go through each APIC to domain mapping
for (&apic, domain) in apic_to_domain.iter() {
apic_mappings[apic as usize] = domain_to_mem.get(domain)
.and_then(|&rs| {
Some(rs)
});
}
// Store the apic mapping database into the global!
APIC_TO_MEMORY_RANGE.store(Box::into_raw(apic_mappings), Ordering::SeqCst);
}
/// Find a free region of virtual memory that can hold `size` bytes and return
/// the virtual address
///
/// This is only valid for virtual requests for 4 KiB mappings
pub fn alloc_virt_addr_4k(size: u64) -> VirtAddr {
/// Base address for virtual allocations
static NEXT_FREE_VADDR: AtomicU64 = AtomicU64::new(KERNEL_VMEM_BASE);
/// Gap between virtual allocations
const GUARD_PAGE_SIZE: u64 = 32 * 1024;
assert!(size > 0 && (size & 0xfff) == 0,
"Invalid size for virtual region allocation");
// Compute the amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct | {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize;
} else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
}
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
/// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory
let total_phys = core!().boot_args
.total_physical_memory.load(Ordering::Relaxed);
// Get physical memory in use
let phys_inuse =
total_phys - GLOBAL_ALLOCATOR.free_physical.load(Ordering::Relaxed);
print!("Allocs {:8} | Frees {:8} | Physical {:10.2} MiB / {:10.2} MiB | \
Free List {:10.2} MiB\n",
GLOBAL_ALLOCATOR.num_allocs.load(Ordering::Relaxed),
GLOBAL_ALLOCATOR.num_frees.load(Ordering::Relaxed),
phys_inuse as f64 / 1024. / 1024.,
total_phys as f64 / 1024. / 1024.,
GLOBAL_ALLOCATOR.free_list
.load(Ordering::Relaxed) as f64 / 1024. / 1024.);
}
unsafe impl GlobalAlloc for GlobalAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Allocate memory from our free lists
let ptr = core!().free_list(layout).lock().pop();
// Update stats
self.num_allocs.fetch_add(1, Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Free the memory
core!().free_list(layout).lock().push(ptr);
// Update stats
self.num_frees.fetch_add(1, Ordering::Relaxed);
}
}
/// Allocation containing a physically contiguous allocation
pub struct PhysContig<T> {
/// Virtual address of the allocation
vaddr: VirtAddr,
/// Physical address of the allocation
paddr: PhysAddr,
/// Mark that this "holds" a `T`
_phantom: PhantomData<T>,
}
impl<T> PhysContig<T> {
/// Allocate physically contiguous memory large enough to hold `val` and
/// move `val` into it
pub fn new(val: T) -> PhysContig<T> {
assert!(size_of::<T>() > 0, "Cannot use ZST for PhysContig");
assert!(size_of::<T>() <= 4096, "Size too large for PhysContig");
unsafe {
// Allocate a 4 KiB page
let alloc = GLOBAL_ALLOCATOR.alloc(
Layout::from_size_align(4096, 4096).unwrap());
// Compute the physical address of this allocation
let paddr = PhysAddr(alloc as u64 - KERNEL_PHYS_WINDOW_BASE);
// Initialize the memory to `val`
core::ptr::write(alloc as *mut T, val);
// Create the `PhysContig` structure
PhysContig {
vaddr: VirtAddr(alloc as u64),
paddr: paddr,
_phantom: PhantomData,
}
}
}
/// Get the physical address of the allocation
pub fn phys_addr(&self) -> PhysAddr {
self.paddr
}
}
impl<T> Drop for PhysContig<T> {
fn drop(&mut self) {
unsafe {
GLOBAL_ALLOCATOR.dealloc(self.vaddr.0 as *mut u8,
Layout::from_size_align(4096, 4096).unwrap());
}
}
}
impl<T> Deref for PhysContig | FreeListNode | identifier_name |
mm.rs | alloc::collections::BTreeMap;
use crate::acpi::MAX_CORES;
use rangeset::RangeSet;
use boot_args::{KERNEL_PHYS_WINDOW_BASE, KERNEL_PHYS_WINDOW_SIZE};
use boot_args::KERNEL_VMEM_BASE;
use page_table::{PhysMem, PhysAddr, PageType, VirtAddr};
/// Table which is indexed by an APIC identifier to map to a physical range
/// which is local to it its NUMA node
static APIC_TO_MEMORY_RANGE: AtomicPtr<[Option<RangeSet>; MAX_CORES]> =
AtomicPtr::new(core::ptr::null_mut());
/// Get the preferred memory range for the currently running APIC. Returns
/// `None` if we have no valid APIC ID yet, or we do not have NUMA knowledge
/// of the current APIC ID
pub fn memory_range<'a>() -> Option<&'a RangeSet> {
// Check to see if the `APIC_TO_MEMORY_RANGE` has been initialized
let atmr = APIC_TO_MEMORY_RANGE.load(Ordering::SeqCst);
if atmr.is_null() {
return None;
}
// Cast the memory range structure to something we can access
let atmr = unsafe { &*atmr };
// Based on our current APIC ID look up the memory range
core!().apic_id().and_then(|x| atmr[x as usize].as_ref())
}
/// Establish the `APIC_TO_MEMORY_RANGE` global with the APIC IDs to their
/// corresponding NUMA-local memory regions
pub unsafe fn register_numa_nodes(apic_to_domain: BTreeMap<u32, u32>,
domain_to_mem: BTreeMap<u32, RangeSet>) {
// Create a heap-based database
let mut apic_mappings: Box<MaybeUninit<[Option<RangeSet>; MAX_CORES]>> =
Box::new_uninit();
// Initialize the heap based memory
for core in 0..MAX_CORES {
let foo = apic_mappings.as_mut_ptr() as *mut Option<RangeSet>;
core::ptr::write(foo.offset(core as isize), None);
}
// APIC mappings are now initialized
let mut apic_mappings = apic_mappings.assume_init();
// Go through each APIC to domain mapping
for (&apic, domain) in apic_to_domain.iter() {
apic_mappings[apic as usize] = domain_to_mem.get(domain)
.and_then(|&rs| {
Some(rs)
});
}
// Store the apic mapping database into the global!
APIC_TO_MEMORY_RANGE.store(Box::into_raw(apic_mappings), Ordering::SeqCst);
}
/// Find a free region of virtual memory that can hold `size` bytes and return
/// the virtual address
///
/// This is only valid for virtual requests for 4 KiB mappings
pub fn alloc_virt_addr_4k(size: u64) -> VirtAddr {
/// Base address for virtual allocations
static NEXT_FREE_VADDR: AtomicU64 = AtomicU64::new(KERNEL_VMEM_BASE);
/// Gap between virtual allocations
const GUARD_PAGE_SIZE: u64 = 32 * 1024;
assert!(size > 0 && (size & 0xfff) == 0,
"Invalid size for virtual region allocation");
// Compute the amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct FreeListNode {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize;
} else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
}
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
| let total_phys = core!().boot_args
.total_physical_memory.load(Ordering::Relaxed);
// Get physical memory in use
let phys_inuse =
total_phys - GLOBAL_ALLOCATOR.free_physical.load(Ordering::Relaxed);
print!("Allocs {:8} | Frees {:8} | Physical {:10.2} MiB / {:10.2} MiB | \
Free List {:10.2} MiB\n",
GLOBAL_ALLOCATOR.num_allocs.load(Ordering::Relaxed),
GLOBAL_ALLOCATOR.num_frees.load(Ordering::Relaxed),
phys_inuse as f64 / 1024. / 1024.,
total_phys as f64 / 1024. / 1024.,
GLOBAL_ALLOCATOR.free_list
.load(Ordering::Relaxed) as f64 / 1024. / 1024.);
}
unsafe impl GlobalAlloc for GlobalAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Allocate memory from our free lists
let ptr = core!().free_list(layout).lock().pop();
// Update stats
self.num_allocs.fetch_add(1, Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Free the memory
core!().free_list(layout).lock().push(ptr);
// Update stats
self.num_frees.fetch_add(1, Ordering::Relaxed);
}
}
/// Allocation containing a physically contiguous allocation
pub struct PhysContig<T> {
/// Virtual address of the allocation
vaddr: VirtAddr,
/// Physical address of the allocation
paddr: PhysAddr,
/// Mark that this "holds" a `T`
_phantom: PhantomData<T>,
}
impl<T> PhysContig<T> {
/// Allocate physically contiguous memory large enough to hold `val` and
/// move `val` into it
pub fn new(val: T) -> PhysContig<T> {
assert!(size_of::<T>() > 0, "Cannot use ZST for PhysContig");
assert!(size_of::<T>() <= 4096, "Size too large for PhysContig");
unsafe {
// Allocate a 4 KiB page
let alloc = GLOBAL_ALLOCATOR.alloc(
Layout::from_size_align(4096, 4096).unwrap());
// Compute the physical address of this allocation
let paddr = PhysAddr(alloc as u64 - KERNEL_PHYS_WINDOW_BASE);
// Initialize the memory to `val`
core::ptr::write(alloc as *mut T, val);
// Create the `PhysContig` structure
PhysContig {
vaddr: VirtAddr(alloc as u64),
paddr: paddr,
_phantom: PhantomData,
}
}
}
/// Get the physical address of the allocation
pub fn phys_addr(&self) -> PhysAddr {
self.paddr
}
}
impl<T> Drop for PhysContig<T> {
fn drop(&mut self) {
unsafe {
GLOBAL_ALLOCATOR.dealloc(self.vaddr.0 as *mut u8,
Layout::from_size_align(4096, 4096).unwrap());
}
}
}
impl<T> Deref for PhysContig< | /// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory | random_line_split |
mm.rs | collections::BTreeMap;
use crate::acpi::MAX_CORES;
use rangeset::RangeSet;
use boot_args::{KERNEL_PHYS_WINDOW_BASE, KERNEL_PHYS_WINDOW_SIZE};
use boot_args::KERNEL_VMEM_BASE;
use page_table::{PhysMem, PhysAddr, PageType, VirtAddr};
/// Table which is indexed by an APIC identifier to map to a physical range
/// which is local to it its NUMA node
static APIC_TO_MEMORY_RANGE: AtomicPtr<[Option<RangeSet>; MAX_CORES]> =
AtomicPtr::new(core::ptr::null_mut());
/// Get the preferred memory range for the currently running APIC. Returns
/// `None` if we have no valid APIC ID yet, or we do not have NUMA knowledge
/// of the current APIC ID
pub fn memory_range<'a>() -> Option<&'a RangeSet> {
// Check to see if the `APIC_TO_MEMORY_RANGE` has been initialized
let atmr = APIC_TO_MEMORY_RANGE.load(Ordering::SeqCst);
if atmr.is_null() {
return None;
}
// Cast the memory range structure to something we can access
let atmr = unsafe { &*atmr };
// Based on our current APIC ID look up the memory range
core!().apic_id().and_then(|x| atmr[x as usize].as_ref())
}
/// Establish the `APIC_TO_MEMORY_RANGE` global with the APIC IDs to their
/// corresponding NUMA-local memory regions
pub unsafe fn register_numa_nodes(apic_to_domain: BTreeMap<u32, u32>,
domain_to_mem: BTreeMap<u32, RangeSet>) {
// Create a heap-based database
let mut apic_mappings: Box<MaybeUninit<[Option<RangeSet>; MAX_CORES]>> =
Box::new_uninit();
// Initialize the heap based memory
for core in 0..MAX_CORES {
let foo = apic_mappings.as_mut_ptr() as *mut Option<RangeSet>;
core::ptr::write(foo.offset(core as isize), None);
}
// APIC mappings are now initialized
let mut apic_mappings = apic_mappings.assume_init();
// Go through each APIC to domain mapping
for (&apic, domain) in apic_to_domain.iter() {
apic_mappings[apic as usize] = domain_to_mem.get(domain)
.and_then(|&rs| {
Some(rs)
});
}
// Store the apic mapping database into the global!
APIC_TO_MEMORY_RANGE.store(Box::into_raw(apic_mappings), Ordering::SeqCst);
}
/// Find a free region of virtual memory that can hold `size` bytes and return
/// the virtual address
///
/// This is only valid for virtual requests for 4 KiB mappings
pub fn alloc_virt_addr_4k(size: u64) -> VirtAddr {
/// Base address for virtual allocations
static NEXT_FREE_VADDR: AtomicU64 = AtomicU64::new(KERNEL_VMEM_BASE);
/// Gap between virtual allocations
const GUARD_PAGE_SIZE: u64 = 32 * 1024;
assert!(size > 0 && (size & 0xfff) == 0,
"Invalid size for virtual region allocation");
// Compute the amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct FreeListNode {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else | } else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
}
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
/// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory
let total_phys = core!().boot_args
.total_physical_memory.load(Ordering::Relaxed);
// Get physical memory in use
let phys_inuse =
total_phys - GLOBAL_ALLOCATOR.free_physical.load(Ordering::Relaxed);
print!("Allocs {:8} | Frees {:8} | Physical {:10.2} MiB / {:10.2} MiB | \
Free List {:10.2} MiB\n",
GLOBAL_ALLOCATOR.num_allocs.load(Ordering::Relaxed),
GLOBAL_ALLOCATOR.num_frees.load(Ordering::Relaxed),
phys_inuse as f64 / 1024. / 1024.,
total_phys as f64 / 1024. / 1024.,
GLOBAL_ALLOCATOR.free_list
.load(Ordering::Relaxed) as f64 / 1024. / 1024.);
}
unsafe impl GlobalAlloc for GlobalAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Allocate memory from our free lists
let ptr = core!().free_list(layout).lock().pop();
// Update stats
self.num_allocs.fetch_add(1, Ordering::Relaxed);
ptr
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// Free the memory
core!().free_list(layout).lock().push(ptr);
// Update stats
self.num_frees.fetch_add(1, Ordering::Relaxed);
}
}
/// Allocation containing a physically contiguous allocation
pub struct PhysContig<T> {
/// Virtual address of the allocation
vaddr: VirtAddr,
/// Physical address of the allocation
paddr: PhysAddr,
/// Mark that this "holds" a `T`
_phantom: PhantomData<T>,
}
impl<T> PhysContig<T> {
/// Allocate physically contiguous memory large enough to hold `val` and
/// move `val` into it
pub fn new(val: T) -> PhysContig<T> {
assert!(size_of::<T>() > 0, "Cannot use ZST for PhysContig");
assert!(size_of::<T>() <= 4096, "Size too large for PhysContig");
unsafe {
// Allocate a 4 KiB page
let alloc = GLOBAL_ALLOCATOR.alloc(
Layout::from_size_align(4096, 4096).unwrap());
// Compute the physical address of this allocation
let paddr = PhysAddr(alloc as u64 - KERNEL_PHYS_WINDOW_BASE);
// Initialize the memory to `val`
core::ptr::write(alloc as *mut T, val);
// Create the `PhysContig` structure
PhysContig {
vaddr: VirtAddr(alloc as u64),
paddr: paddr,
_phantom: PhantomData,
}
}
}
/// Get the physical address of the allocation
pub fn phys_addr(&self) -> PhysAddr {
self.paddr
}
}
impl<T> Drop for PhysContig<T> {
fn drop(&mut self) {
unsafe {
GLOBAL_ALLOCATOR.dealloc(self.vaddr.0 as *mut u8,
Layout::from_size_align(4096, 4096).unwrap());
}
}
}
impl<T> Deref for PhysContig | {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize; | conditional_block |
main.rs | ]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct | {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position,..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers &!Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else {
(&self.index_buffer, self.num_indices)
}
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
| State | identifier_name |
main.rs | ]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?; | let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position,..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers &!Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else {
(&self.index_buffer, self.num_indices)
}
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
| random_line_split |
|
main.rs | ]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position,..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers &!Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else |
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
| {
(&self.index_buffer, self.num_indices)
} | conditional_block |
main.rs | ]
}
}
}
const VERTICES: &[Vertex] = &[
// Changed
Vertex { position: [-0.0868241, 0.49240386, 0.0], tex_coords: [0.4131759, 0.00759614], }, // A
Vertex { position: [-0.49513406, 0.06958647, 0.0], tex_coords: [0.0048659444, 0.43041354], }, // B
Vertex { position: [-0.21918549, -0.44939706, 0.0], tex_coords: [0.28081453, 0.949397057], }, // C
Vertex { position: [0.35966998, -0.3473291, 0.0], tex_coords: [0.85967, 0.84732911], }, // D
Vertex { position: [0.44147372, 0.2347359, 0.0], tex_coords: [0.9414737, 0.2652641], }, // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) |
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position,..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers &!Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else {
(&self.index_buffer, self.num_indices)
}
};
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(data.0.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(
0..data.1,
0,
0..1
);
}
self.queue.submit(std::iter::once(encoder.finish()));
Ok(())
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::init();
// env_logger::Builder::new()
// .filter_module(
// "learn_wgpu_book", log::LevelFilter::Debug
// )
// .init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)?;
let mut state = futures::executor::block_on(State::new(&window))?;
event_loop.run(move |event, _, control_flow|
match event {
Event::WindowEvent {
ref event,
window_id
| {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
} | identifier_body |
upload.rs | ,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if!self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in.pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
Ok(()) => {
eprintln!("🔑 Removed wrong password from keyring")
} | Err(keyring::Error::NoEntry)
| Err(keyring::Error::NoStorageAccess(_)) | random_line_split |
|
upload.rs | #[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if!self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
| config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in.pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
| config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
| conditional_block |
upload.rs | #[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if!self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
| // Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in.pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
| Registry {
username,
password,
url,
}
}
}
/ | identifier_body |
upload.rs | #[arg(short, long, env = "MATURIN_USERNAME")]
username: Option<String>,
/// Password for pypi or your custom registry.
///
/// Can also be set via MATURIN_PASSWORD environment variable.
#[arg(short, long, env = "MATURIN_PASSWORD", hide_env_values = true)]
password: Option<String>,
/// Continue uploading files if one already exists.
/// (Only valid when uploading to PyPI. Other implementations may not support this.)
#[arg(long)]
skip_existing: bool,
/// Do not interactively prompt for username/password if the required credentials are missing.
///
/// Can also be set via MATURIN_NON_INTERACTIVE environment variable.
#[arg(long, env = "MATURIN_NON_INTERACTIVE")]
non_interactive: bool,
}
impl PublishOpt {
const DEFAULT_REPOSITORY_URL: &'static str = "https://upload.pypi.org/legacy/";
const TEST_REPOSITORY_URL: &'static str = "https://test.pypi.org/legacy/";
/// Set to non interactive mode if we're running on CI
pub fn non_interactive_on_ci(&mut self) {
if!self.non_interactive && env::var("CI").map(|v| v == "true").unwrap_or_default() {
eprintln!("🎛️ Running in non-interactive mode on CI");
self.non_interactive = true;
}
}
}
/// Error type for different types of errors that can happen when uploading a
/// wheel.
///
/// The most interesting type is AuthenticationError because it allows asking
/// the user to reenter the password
#[derive(Error, Debug)]
#[error("Uploading to the registry failed")]
pub enum UploadError {
/// Any ureq error
#[error("Http error")]
UreqError(#[source] Box<ureq::Error>),
/// The registry returned a "403 Forbidden"
#[error("Username or password are incorrect")]
AuthenticationError(String),
/// Reading the wheel failed
#[error("IO Error")]
IoError(#[source] io::Error),
/// The registry returned something else than 200
#[error("Failed to upload the wheel with status {0}: {1}")]
StatusCodeError(String, String),
/// File already exists
#[error("File already exists: {0}")]
FileExistsError(String),
/// Read package metadata error
#[error("Could not read the metadata from the package at {0}")]
PkgInfoError(PathBuf, #[source] python_pkginfo::Error),
/// TLS error
#[cfg(feature = "native-tls")]
#[error("TLS Error")]
TlsError(#[source] native_tls::Error),
}
impl From<io::Error> for UploadError {
fn from(error: io::Error) -> Self {
UploadError::IoError(error)
}
}
impl From<ureq::Error> for UploadError {
fn from(error: ureq::Error) -> Self {
UploadError::UreqError(Box::new(error))
}
}
#[cfg(feature = "native-tls")]
impl From<native_tls::Error> for UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceRes | : String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in.pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS_CA_BUNDLE"))
.or_else(|| env::var_os("CURL_CA_BUNDLE"))
}
// Prefer rustls if both native-tls and rustls features are enabled
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
| ponse {
audience | identifier_name |
lib.rs | use std::ops::{Deref, DerefMut};
use std::any::Any;
use std::fmt::Debug;
use std::fmt;
pub use cod_node_derive::Node;
mod id;
mod context;
mod danger_zone;
#[cfg(test)]
mod test;
pub use id::ID;
use id::new_id;
use context::{CONTEXT, Context, PollReason, Replacement, IDMapUpdate};
use danger_zone::downcast_rc;
/// Can be changed to Arc later. However, the design is not thread-aware
/// when mutating. So appropriate!Send/!Syncs need to be defined before changing.
pub use std::rc::Rc as Rc;
pub use std::rc::Weak as Weak;
pub use im_rc as im;
#[derive(Clone, Debug)]
pub struct Header {
id: ID,
parent_id: Option<ID>,
}
impl Header {
pub fn new() -> Self {
Header {
id: new_id(),
parent_id: None
}
}
}
impl Default for Header {
fn default() -> Self {
Self::new()
}
}
pub trait Node:'static {
fn header(&self) -> &Header;
fn header_mut(&mut self) -> &mut Header;
/// Optional: You may implement this method for your struct if it does something special.
///
/// For example, you would want to do this if `.clone()` does not actually clone
/// all the `Child` instances in the struct.
///
/// Cod will use this when updating the ancestors of a node that was mutated.
///
/// The implementaion should find the `Child` instance which corresponds to the
/// given ID, and call `.poll_mut()` on it. You should not do anything else
/// with the `Child`s, doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> |
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn get_ref(&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
}
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
/// Due to implementation details, this has to clone the root and all its
/// children.
pub fn new(root: &R) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
// this initiates a deep clone because mutation context is active
let root = Rc::new(root.clone());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
pub fn get_mut<'a, T: NodeClone + Clone>(&'a mut self, mut node: Rc<T>) -> MutRef<'a, R, T> {
Rc::make_mut(&mut node);
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
MutRef {
state: self,
node
}
}
pub fn ref_from_id(&self, id: ID) -> Option<Rc<dyn NodeClone>> {
Weak::upgrade(self.id_lookup.get(&id)?)
}
pub fn root(&self) -> &R {
&self.root
}
pub fn root_ref(&self) -> Rc<R> {
Rc::clone(&self.root)
}
fn apply_updates(&mut self, updates: impl Iterator<Item=IDMapUpdate>) {
for update in updates {
match update {
IDMapUpdate::Set(id, new_ref) => {
self.id_lookup.insert(id, new_ref);
},
IDMapUpdate::Erase(id) => {
self.id_lookup.remove(&id);
},
}
}
}
}
pub struct MutRef<'a, R: NodeClone + Clone, T: NodeClone> {
state: &'a mut State<R>,
node: Rc<T>,
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Deref for MutRef<'a, R, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> DerefMut for MutRef<'a, R, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the node Rc is mutably borrowed and
// made unique upon creation of self.
Rc::get_mut(&mut self.node).unwrap()
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Drop for MutRef<'a, R, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_mutate(c));
});
self.state.id_lookup.insert(self.node.header().id, Rc::downgrade(&self.node) as Weak<dyn NodeClone>);
let mut prev_node = Rc::clone(&self.node) as Rc<dyn NodeClone>;
while let Some(parent_id) = prev_node.header().parent_id {
let parent = Weak::upgrade(self.state.id_lookup.get(&parent_id).unwrap()).unwrap();
CONTEXT.with(|c| {
Context::set_replacement(c,
Replacement { id: prev_node.header().id, replace_with: Rc::clone(&prev_node) as Rc<dyn NodeClone> }
);
});
prev_node = parent.dyn_clone();
CONTEXT.with(|c| {
if!Context::finish_replacement(c) {
panic!("Cod: Could not find associated `Child` while traversing up")
}
});
}
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_replacement(c));
});
self.state.root = downcast_rc(prev_node).unwrap();
}
}
| {
Rc::new(self.clone())
} | identifier_body |
lib.rs | use std::ops::{Deref, DerefMut};
use std::any::Any;
use std::fmt::Debug;
use std::fmt;
pub use cod_node_derive::Node;
mod id;
mod context;
mod danger_zone;
#[cfg(test)]
mod test;
pub use id::ID;
use id::new_id;
use context::{CONTEXT, Context, PollReason, Replacement, IDMapUpdate};
use danger_zone::downcast_rc;
/// Can be changed to Arc later. However, the design is not thread-aware
/// when mutating. So appropriate!Send/!Syncs need to be defined before changing.
pub use std::rc::Rc as Rc;
pub use std::rc::Weak as Weak;
pub use im_rc as im;
#[derive(Clone, Debug)]
pub struct Header {
id: ID,
parent_id: Option<ID>,
}
impl Header {
pub fn new() -> Self {
Header {
id: new_id(),
parent_id: None
}
}
}
impl Default for Header {
fn default() -> Self {
Self::new()
}
}
pub trait Node:'static {
fn header(&self) -> &Header;
fn header_mut(&mut self) -> &mut Header;
/// Optional: You may implement this method for your struct if it does something special.
///
/// For example, you would want to do this if `.clone()` does not actually clone
/// all the `Child` instances in the struct.
///
/// Cod will use this when updating the ancestors of a node that was mutated.
///
/// The implementaion should find the `Child` instance which corresponds to the
/// given ID, and call `.poll_mut()` on it. You should not do anything else
/// with the `Child`s, doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> {
Rc::new(self.clone())
}
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn | (&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
}
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
/// Due to implementation details, this has to clone the root and all its
/// children.
pub fn new(root: &R) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
// this initiates a deep clone because mutation context is active
let root = Rc::new(root.clone());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
pub fn get_mut<'a, T: NodeClone + Clone>(&'a mut self, mut node: Rc<T>) -> MutRef<'a, R, T> {
Rc::make_mut(&mut node);
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
MutRef {
state: self,
node
}
}
pub fn ref_from_id(&self, id: ID) -> Option<Rc<dyn NodeClone>> {
Weak::upgrade(self.id_lookup.get(&id)?)
}
pub fn root(&self) -> &R {
&self.root
}
pub fn root_ref(&self) -> Rc<R> {
Rc::clone(&self.root)
}
fn apply_updates(&mut self, updates: impl Iterator<Item=IDMapUpdate>) {
for update in updates {
match update {
IDMapUpdate::Set(id, new_ref) => {
self.id_lookup.insert(id, new_ref);
},
IDMapUpdate::Erase(id) => {
self.id_lookup.remove(&id);
},
}
}
}
}
pub struct MutRef<'a, R: NodeClone + Clone, T: NodeClone> {
state: &'a mut State<R>,
node: Rc<T>,
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Deref for MutRef<'a, R, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> DerefMut for MutRef<'a, R, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the node Rc is mutably borrowed and
// made unique upon creation of self.
Rc::get_mut(&mut self.node).unwrap()
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Drop for MutRef<'a, R, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_mutate(c));
});
self.state.id_lookup.insert(self.node.header().id, Rc::downgrade(&self.node) as Weak<dyn NodeClone>);
let mut prev_node = Rc::clone(&self.node) as Rc<dyn NodeClone>;
while let Some(parent_id) = prev_node.header().parent_id {
let parent = Weak::upgrade(self.state.id_lookup.get(&parent_id).unwrap()).unwrap();
CONTEXT.with(|c| {
Context::set_replacement(c,
Replacement { id: prev_node.header().id, replace_with: Rc::clone(&prev_node) as Rc<dyn NodeClone> }
);
});
prev_node = parent.dyn_clone();
CONTEXT.with(|c| {
if!Context::finish_replacement(c) {
panic!("Cod: Could not find associated `Child` while traversing up")
}
});
}
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_replacement(c));
});
self.state.root = downcast_rc(prev_node).unwrap();
}
}
| get_ref | identifier_name |
lib.rs | use std::ops::{Deref, DerefMut};
use std::any::Any;
use std::fmt::Debug;
use std::fmt;
pub use cod_node_derive::Node;
mod id;
mod context;
mod danger_zone;
#[cfg(test)]
mod test;
pub use id::ID;
use id::new_id;
use context::{CONTEXT, Context, PollReason, Replacement, IDMapUpdate};
use danger_zone::downcast_rc;
/// Can be changed to Arc later. However, the design is not thread-aware
/// when mutating. So appropriate!Send/!Syncs need to be defined before changing.
pub use std::rc::Rc as Rc;
pub use std::rc::Weak as Weak;
pub use im_rc as im;
#[derive(Clone, Debug)]
pub struct Header {
id: ID,
parent_id: Option<ID>,
}
impl Header {
pub fn new() -> Self {
Header {
id: new_id(),
parent_id: None
}
}
}
impl Default for Header {
fn default() -> Self {
Self::new()
}
}
pub trait Node:'static {
fn header(&self) -> &Header;
fn header_mut(&mut self) -> &mut Header;
/// Optional: You may implement this method for your struct if it does something special.
///
/// For example, you would want to do this if `.clone()` does not actually clone
/// all the `Child` instances in the struct.
///
/// Cod will use this when updating the ancestors of a node that was mutated.
///
/// The implementaion should find the `Child` instance which corresponds to the
/// given ID, and call `.poll_mut()` on it. You should not do anything else
/// with the `Child`s, doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> {
Rc::new(self.clone())
}
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn get_ref(&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
} | // so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
/// Due to implementation details, this has to clone the root and all its
/// children.
pub fn new(root: &R) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
// this initiates a deep clone because mutation context is active
let root = Rc::new(root.clone());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone>);
state
}
pub fn get_mut<'a, T: NodeClone + Clone>(&'a mut self, mut node: Rc<T>) -> MutRef<'a, R, T> {
Rc::make_mut(&mut node);
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
MutRef {
state: self,
node
}
}
pub fn ref_from_id(&self, id: ID) -> Option<Rc<dyn NodeClone>> {
Weak::upgrade(self.id_lookup.get(&id)?)
}
pub fn root(&self) -> &R {
&self.root
}
pub fn root_ref(&self) -> Rc<R> {
Rc::clone(&self.root)
}
fn apply_updates(&mut self, updates: impl Iterator<Item=IDMapUpdate>) {
for update in updates {
match update {
IDMapUpdate::Set(id, new_ref) => {
self.id_lookup.insert(id, new_ref);
},
IDMapUpdate::Erase(id) => {
self.id_lookup.remove(&id);
},
}
}
}
}
pub struct MutRef<'a, R: NodeClone + Clone, T: NodeClone> {
state: &'a mut State<R>,
node: Rc<T>,
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Deref for MutRef<'a, R, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> DerefMut for MutRef<'a, R, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the node Rc is mutably borrowed and
// made unique upon creation of self.
Rc::get_mut(&mut self.node).unwrap()
}
}
impl<'a, R: NodeClone + Clone, T: NodeClone> Drop for MutRef<'a, R, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_mutate(c));
});
self.state.id_lookup.insert(self.node.header().id, Rc::downgrade(&self.node) as Weak<dyn NodeClone>);
let mut prev_node = Rc::clone(&self.node) as Rc<dyn NodeClone>;
while let Some(parent_id) = prev_node.header().parent_id {
let parent = Weak::upgrade(self.state.id_lookup.get(&parent_id).unwrap()).unwrap();
CONTEXT.with(|c| {
Context::set_replacement(c,
Replacement { id: prev_node.header().id, replace_with: Rc::clone(&prev_node) as Rc<dyn NodeClone> }
);
});
prev_node = parent.dyn_clone();
CONTEXT.with(|c| {
if!Context::finish_replacement(c) {
panic!("Cod: Could not find associated `Child` while traversing up")
}
});
}
CONTEXT.with(|c| {
self.state.apply_updates(Context::end_replacement(c));
});
self.state.root = downcast_rc(prev_node).unwrap();
}
} | }
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data, | random_line_split |
main.rs | extern crate hyper;
extern crate rustc_serialize;
extern crate url;
mod errors;
mod structs;
use errors::*;
use errors::ResourceError::*;
use structs::*;
use hyper::client::Client;
use std::fs::File;
use hyper::header::*;
use std::io::prelude::*;
use std::path::Path;
use std::thread;
use std::sync::Arc;
use rustc_serialize::json;
use hyper::mime::{Mime};
use std::sync::mpsc;
use hyper::status::StatusCode;
use url::Url;
//use std::fs;
//const TEST_ID : &'static str = "5b11f4ce-a62d-471e-81fc-a69a8278c7da";
const USER_ARGENT: &'static str = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36";
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! musicbrainz_url {
($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) |
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send +'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send +'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn fs(&self) -> SimpleFs {
match *self {
Provider::Musicbrainz => SimpleFs { directory : "tmp".to_string() },
Provider::CoverArt => SimpleFs { directory : "tmp".to_string() }
}
}
fn extract_id(&self, url: &str) -> String {
let parsed : Url = Url::parse(url).unwrap();
parsed.path_segments().unwrap().last().unwrap().to_string()
}
fn format_file_name (&self, id : &str) -> String {
match *self {
Provider::Musicbrainz => musicbrainz_file!(id),
Provider::CoverArt => cover_art_file!(id)
}
}
}
#[test]
fn test_extract_id_from_url() {
let mb : String = musicbrainz_url!("1289836171-250");
assert_eq!("1289836171-250", Provider::extract_id(&mb));
}
#[test]
fn test_format_file_name() {
assert_eq!("ca_123", Provider::COVER_ART.format_file_name("123"));
assert_eq!("mb_123", Provider::MUSICBRAINZ.format_file_name("123"));
}
#[cfg(not(feature="meshup_mode_web"))]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
FileMeshup.query(id)
}
#[cfg(feature="meshup_mode_web")]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
WebMeshup.query(id)
}
fn main() {
let args : Vec<String> = std::env::args().into_iter().collect();
let id = &args[1];
let web_response = query(id).unwrap();
print!("{}", json::encode(&web_response).unwrap())
}
| {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
} | identifier_body |
main.rs | extern crate hyper;
extern crate rustc_serialize;
extern crate url;
mod errors;
mod structs;
use errors::*;
use errors::ResourceError::*;
use structs::*;
use hyper::client::Client;
use std::fs::File;
use hyper::header::*;
use std::io::prelude::*;
use std::path::Path;
use std::thread;
use std::sync::Arc;
use rustc_serialize::json;
use hyper::mime::{Mime};
use std::sync::mpsc;
use hyper::status::StatusCode;
use url::Url;
//use std::fs;
//const TEST_ID : &'static str = "5b11f4ce-a62d-471e-81fc-a69a8278c7da";
const USER_ARGENT: &'static str = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36";
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! musicbrainz_url {
($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if!path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
}
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send +'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send +'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn | (&self) -> SimpleFs {
match *self {
Provider::Musicbrainz => SimpleFs { directory : "tmp".to_string() },
Provider::CoverArt => SimpleFs { directory : "tmp".to_string() }
}
}
fn extract_id(&self, url: &str) -> String {
let parsed : Url = Url::parse(url).unwrap();
parsed.path_segments().unwrap().last().unwrap().to_string()
}
fn format_file_name (&self, id : &str) -> String {
match *self {
Provider::Musicbrainz => musicbrainz_file!(id),
Provider::CoverArt => cover_art_file!(id)
}
}
}
#[test]
fn test_extract_id_from_url() {
let mb : String = musicbrainz_url!("1289836171-250");
assert_eq!("1289836171-250", Provider::extract_id(&mb));
}
#[test]
fn test_format_file_name() {
assert_eq!("ca_123", Provider::COVER_ART.format_file_name("123"));
assert_eq!("mb_123", Provider::MUSICBRAINZ.format_file_name("123"));
}
#[cfg(not(feature="meshup_mode_web"))]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
FileMeshup.query(id)
}
#[cfg(feature="meshup_mode_web")]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
WebMeshup.query(id)
}
fn main() {
let args : Vec<String> = std::env::args().into_iter().collect();
let id = &args[1];
let web_response = query(id).unwrap();
print!("{}", json::encode(&web_response).unwrap())
}
| fs | identifier_name |
main.rs | extern crate hyper;
extern crate rustc_serialize;
extern crate url;
mod errors;
mod structs;
use errors::*;
use errors::ResourceError::*;
use structs::*;
use hyper::client::Client;
use std::fs::File;
use hyper::header::*;
use std::io::prelude::*;
use std::path::Path;
use std::thread;
use std::sync::Arc;
use rustc_serialize::json;
use hyper::mime::{Mime};
use std::sync::mpsc;
use hyper::status::StatusCode;
use url::Url;
//use std::fs;
//const TEST_ID : &'static str = "5b11f4ce-a62d-471e-81fc-a69a8278c7da";
const USER_ARGENT: &'static str = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36";
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! musicbrainz_url { | macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if!path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
}
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send +'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send +'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn fs(&self) -> SimpleFs {
match *self {
Provider::Musicbrainz => SimpleFs { directory : "tmp".to_string() },
Provider::CoverArt => SimpleFs { directory : "tmp".to_string() }
}
}
fn extract_id(&self, url: &str) -> String {
let parsed : Url = Url::parse(url).unwrap();
parsed.path_segments().unwrap().last().unwrap().to_string()
}
fn format_file_name (&self, id : &str) -> String {
match *self {
Provider::Musicbrainz => musicbrainz_file!(id),
Provider::CoverArt => cover_art_file!(id)
}
}
}
#[test]
fn test_extract_id_from_url() {
let mb : String = musicbrainz_url!("1289836171-250");
assert_eq!("1289836171-250", Provider::extract_id(&mb));
}
#[test]
fn test_format_file_name() {
assert_eq!("ca_123", Provider::COVER_ART.format_file_name("123"));
assert_eq!("mb_123", Provider::MUSICBRAINZ.format_file_name("123"));
}
#[cfg(not(feature="meshup_mode_web"))]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
FileMeshup.query(id)
}
#[cfg(feature="meshup_mode_web")]
fn query(id: &str) -> Result<ArtistReference, ResourceError> {
WebMeshup.query(id)
}
fn main() {
let args : Vec<String> = std::env::args().into_iter().collect();
let id = &args[1];
let web_response = query(id).unwrap();
print!("{}", json::encode(&web_response).unwrap())
} | ($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
| random_line_split |
memory.rs | use std::mem::transmute;
use world::World;
use geometry::Point;
use sprite::Sprite;
use util;
// In Hex's Cellar, the player's spells manipulate an u8[40] of bytes that
// affect the world around her. This file gives a "RAM map" for that array.
// Color (3 bits = 8 bright colors) and character (5 bits, offset added to '!')
pub const PLAYER_APPEARANCE: u8 = 0x00;
// Begins a char[15].
pub const PLAYER_NAME: u8 = 0x01;
// Monster data is a (struct {u8, u8, u8})[5].
// So add (n * 3) to this, where 0 <= n <= 4, to get the address for the n-th monster,
// then add the offset of which byte you want.
pub const MONSTERS: u8 = 0x10;
pub const MONSTER_FLAGS: u8 = 0;
pub const MONSTER_POSITION: u8 = 1;
pub const MONSTER_HP: u8 = 2;
// An 8-bit bitmask (there are eight spells).
pub const SPELL_MEMORY: u8 = 0x1f;
// A 32-bit bitmask (there are thirty-two items).
pub const IDENTIFICATION: u8 = 0x20;
// Begins a u8[4]: one byte for each timer (poison, haste, charge, protect).
pub const TIMERS: u8 = 0x24;
// Begins a u8[8].
pub const INVENTORY: u8 = 0x28;
// Same as player appearance.
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
//??? = 0x36;
//??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn peek(world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO:???
0,
0x37 =>
// TODO:???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000!= 0;
monster.vulnerable = value & 0b0100!= 0;
monster.venomous = value & 0b0010!= 0;
monster.corrupted = value & 0b0001!= 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index)!= 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{}, | world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO:???
{},
0x37 =>
// TODO:???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character!= new.character && old.color!= new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character!= new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color!= new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [char; 256] = &[
'', '☺', '☻', '♥', '♦', '♣', '♠', '•', '◘', '○', '◙', '♂', '♀', '♪', '♫', '☼',
'►', '◄', '↕', '‼', '¶', '§', '▬', '↨', '↑', '↓', '→', '←', '∟', '↔', '▲', '▼',
'', '!', '"', '#', '$', '%', '&', '\'','(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\',']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o',
'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '',
'Ç', 'ü', 'é', 'â', 'ä', 'à', 'å', 'ç', 'ê', 'ë', 'è', 'ï', 'î', 'ì', 'Ä', 'Å',
'É', 'æ', 'Æ', 'ô', 'ö', 'ò', 'û', 'ù', 'ÿ', 'Ö', 'Ü', '¢', '£', '¥', '₧', 'ƒ',
'á', 'í', 'ó', 'ú', 'ñ', 'Ñ', 'ª', 'º', '¿', '⌐', '¬', '½', '¼', '¡', '«', '»',
'░', '▒', '▓', '│', '┤', '╡', '╢', '╖', '╕', '╣', '║', '╗', '╝', '╜', '╛', '┐',
'└', '┴', '┬', '├', '─', '┼', '╞', '╟', '╚', '╔', '╩', '╦', '╠', '═', '╬', '╧',
'╨', '╤', '╥', '╙', '╘', '╒', '╓', '╫', '╪', '┘', '┌', '█', '▄', '▌', '▐', '▀',
'α', 'ß', 'Γ', 'π', 'Σ', 'σ', 'µ', 'τ', 'Φ', 'Θ', 'Ω', 'δ', '∞', 'φ', 'ε', '∩',
'≡', '±', '≥', '≤', '⌠', '⌡', '÷', '≈', '°', '∙', '·', '√', 'ⁿ', '²', '■','',
];
pub fn player_name(world: &World) -> String {
let mut i: u8 = PLAYER_NAME;
let mut name = String::new();
while i < 0x40 {
let c = peek(world, i);
if c == 0 { break };
name.push(CP437[c as usize]);
i += 1;
}
return name;
} |
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA => | random_line_split |
memory.rs | use std::mem::transmute;
use world::World;
use geometry::Point;
use sprite::Sprite;
use util;
// In Hex's Cellar, the player's spells manipulate an u8[40] of bytes that
// affect the world around her. This file gives a "RAM map" for that array.
// Color (3 bits = 8 bright colors) and character (5 bits, offset added to '!')
pub const PLAYER_APPEARANCE: u8 = 0x00;
// Begins a char[15].
pub const PLAYER_NAME: u8 = 0x01;
// Monster data is a (struct {u8, u8, u8})[5].
// So add (n * 3) to this, where 0 <= n <= 4, to get the address for the n-th monster,
// then add the offset of which byte you want.
pub const MONSTERS: u8 = 0x10;
pub const MONSTER_FLAGS: u8 = 0;
pub const MONSTER_POSITION: u8 = 1;
pub const MONSTER_HP: u8 = 2;
// An 8-bit bitmask (there are eight spells).
pub const SPELL_MEMORY: u8 = 0x1f;
// A 32-bit bitmask (there are thirty-two items).
pub const IDENTIFICATION: u8 = 0x20;
// Begins a u8[4]: one byte for each timer (poison, haste, charge, protect).
pub const TIMERS: u8 = 0x24;
// Begins a u8[8].
pub const INVENTORY: u8 = 0x28;
// Same as player appearance.
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
//??? = 0x36;
//??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn | (world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO:???
0,
0x37 =>
// TODO:???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000!= 0;
monster.vulnerable = value & 0b0100!= 0;
monster.venomous = value & 0b0010!= 0;
monster.corrupted = value & 0b0001!= 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index)!= 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{},
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA =>
world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO:???
{},
0x37 =>
// TODO:???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character!= new.character && old.color!= new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character!= new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color!= new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [char; 256] = &[
'', '☺', '☻', '♥', '♦', '♣', '♠', '•', '◘', '○', '◙', '♂', '♀', '♪', '♫', '☼',
'►', '◄', '↕', '‼', '¶', '§', '▬', '↨', '↑', '↓', '→', '←', '∟', '↔', '▲', '▼',
'', '!', '"', '#', '$', '%', '&', '\'','(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\',']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o',
'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '',
'Ç', 'ü', 'é', 'â', 'ä', 'à', 'å', 'ç', 'ê', 'ë', 'è', 'ï', 'î', 'ì', 'Ä', 'Å',
'É', 'æ', 'Æ', 'ô', 'ö', 'ò', 'û', 'ù', 'ÿ', 'Ö', 'Ü', '¢', '£', '¥', '₧', 'ƒ',
'á', 'í', 'ó', 'ú', 'ñ', 'Ñ', 'ª', 'º', '¿', '⌐', '¬', '½', '¼', '¡', '«', '»',
'░', '▒', '▓', '│', '┤', '╡', '╢', '╖', '╕', '╣', '║', '╗', '╝', '╜', '╛', '┐',
'└', '┴', '┬', '├', '─', '┼', '╞', '╟', '╚', '╔', '╩', '╦', '╠', '═', '╬', '╧',
'╨', '╤', '╥', '╙', '╘', '╒', '╓', '╫', '╪', '┘', '┌', '█', '▄', '▌', '▐', '▀',
'α', 'ß', 'Γ', 'π', 'Σ', 'σ', 'µ', 'τ', 'Φ', 'Θ', 'Ω', 'δ', '∞', 'φ', 'ε', '∩',
'≡', '±', '≥', '≤', '⌠', '⌡', '÷', '≈', '°', '∙', '·', '√', 'ⁿ', '²', '■','',
];
pub fn player_name(world: &World) -> String {
let mut i: u8 = PLAYER_NAME;
let mut name = String::new();
while i < 0x40 {
let c = peek(world, i);
if c == 0 { break };
name.push(CP437[c as usize]);
i += 1;
}
return name;
}
| peek | identifier_name |
memory.rs | use std::mem::transmute;
use world::World;
use geometry::Point;
use sprite::Sprite;
use util;
// In Hex's Cellar, the player's spells manipulate an u8[40] of bytes that
// affect the world around her. This file gives a "RAM map" for that array.
// Color (3 bits = 8 bright colors) and character (5 bits, offset added to '!')
pub const PLAYER_APPEARANCE: u8 = 0x00;
// Begins a char[15].
pub const PLAYER_NAME: u8 = 0x01;
// Monster data is a (struct {u8, u8, u8})[5].
// So add (n * 3) to this, where 0 <= n <= 4, to get the address for the n-th monster,
// then add the offset of which byte you want.
pub const MONSTERS: u8 = 0x10;
pub const MONSTER_FLAGS: u8 = 0;
pub const MONSTER_POSITION: u8 = 1;
pub const MONSTER_HP: u8 = 2;
// An 8-bit bitmask (there are eight spells).
pub const SPELL_MEMORY: u8 = 0x1f;
// A 32-bit bitmask (there are thirty-two items).
pub const IDENTIFICATION: u8 = 0x20;
// Begins a u8[4]: one byte for each timer (poison, haste, charge, protect).
pub const TIMERS: u8 = 0x24;
// Begins a u8[8].
pub const INVENTORY: u8 = 0x28;
// Same as player appearance.
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
//??? = 0x36;
//??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn peek(world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO:???
0,
0x37 =>
// TODO:???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) | monster.venomous = value & 0b0010!= 0;
monster.corrupted = value & 0b0001!= 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index)!= 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{},
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA =>
world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO:???
{},
0x37 =>
// TODO:???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character!= new.character && old.color!= new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character!= new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color!= new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [char; 256] = &[
'', '☺', '☻', '♥', '♦', '♣', '♠', '•', '◘', '○', '◙', '♂', '♀', '♪', '♫', '☼',
'►', '◄', '↕', '‼', '¶', '§', '▬', '↨', '↑', '↓', '→', '←', '∟', '↔', '▲', '▼',
'', '!', '"', '#', '$', '%', '&', '\'','(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\',']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o',
'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '',
'Ç', 'ü', 'é', 'â', 'ä', 'à', 'å', 'ç', 'ê', 'ë', 'è', 'ï', 'î', 'ì', 'Ä', 'Å',
'É', 'æ', 'Æ', 'ô', 'ö', 'ò', 'û', 'ù', 'ÿ', 'Ö', 'Ü', '¢', '£', '¥', '₧', 'ƒ',
'á', 'í', 'ó', 'ú', 'ñ', 'Ñ', 'ª', 'º', '¿', '⌐', '¬', '½', '¼', '¡', '«', '»',
'░', '▒', '▓', '│', '┤', '╡', '╢', '╖', '╕', '╣', '║', '╗', '╝', '╜', '╛', '┐',
'└', '┴', '┬', '├', '─', '┼', '╞', '╟', '╚', '╔', '╩', '╦', '╠', '═', '╬', '╧',
'╨', '╤', '╥', '╙', '╘', '╒', '╓', '╫', '╪', '┘', '┌', '█', '▄', '▌', '▐', '▀',
'α', 'ß', 'Γ', 'π', 'Σ', 'σ', 'µ', 'τ', 'Φ', 'Θ', 'Ω', 'δ', '∞', 'φ', 'ε', '∩',
'≡', '±', '≥', '≤', '⌠', '⌡', '÷', '≈', '°', '∙', '·', '√', 'ⁿ', '²', '■','',
];
pub fn player_name(world: &World) -> String {
let mut i: u8 = PLAYER_NAME;
let mut name = String::new();
while i < 0x40 {
let c = peek(world, i);
if c == 0 { break };
name.push(CP437[c as usize]);
i += 1;
}
return name;
}
| {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0; | identifier_body |
client.rs | use crate::parser::*;
use crate::*;
use ::reqwest::blocking as reqwest;
use itertools::*;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Range;
use std::time::SystemTime;
use std::sync::{Arc, Mutex};
use chrono::{Utc, Local, DateTime, NaiveDateTime};
lazy_static! {
static ref last_send: Arc<Mutex<Option<DateTime<Utc>>>> =
Arc::new(Mutex::new(None));
}
#[derive(Debug, Clone)]
pub struct Response {
pub stage: i32,
pub info: Info,
pub state: State,
}
fn response_to_json(x: &Response) -> String {
map_to_json(vec![
("stage", format!("{}", x.stage)),
("info", info_to_json(&x.info)),
("state", state_to_json(&x.state)),
])
}
#[derive(Debug, Clone)]
pub struct Info {
pub deadline: i32,
pub role: i32,
pub ability: Ability,
pub range: Range<i32>,
pub opponent_params: Params,
}
fn info_to_json(x: &Info) -> String {
map_to_json(vec![
("deadline", format!("{}", x.deadline)),
("role", format!("{}", x.role)),
("opponent_params", params_to_json(&x.opponent_params)),
])
}
#[derive(Debug, Clone)]
pub struct Ability {
pub potential: i32,
pub max_accelarate: i32,
pub max_heat: i32,
}
#[derive(Debug, Clone)]
pub struct State {
pub tick: i32,
pub range: Range<i32>, // 侵入可能エリアの x,y の絶対値の範囲
pub ships: Vec<Ship>,
}
fn state_to_json(x: &State) -> String {
let mut ships = Vec::new();
for s in &x.ships {
ships.push(ship_to_json(&s));
}
map_to_json(vec![
("tick", format!("{}", x.tick)),
("ships", format!("[{}]", ships.join(","))),
])
}
#[derive(Debug, Clone)]
pub struct Ship {
pub role: i32,
pub id: i32,
pub pos: (i32, i32),
pub v: (i32, i32),
pub status: Params,
pub heat: i32,
pub max_heat: i32,
pub max_accelarate: i32,
pub commands: Vec<Command>,
}
fn ship_to_json(x: &Ship) -> String {
let mut commands = Vec::new();
for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &str) {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if!printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&form | Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len()!= 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]);
let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]]
// [src/bin/app.rs:177] &commands = [
// Pair(
// Num(
// 0,
// ),
// Pair(
// Pair(
// Num(
// 0,
// ),
// Num(
// -1,
// ),
// ),
// Nil,
// ),
// ),
// ]
let commands = commands.into_iter().map(|e| e.as_ref().into()).collect();
Ship {
role,
id,
pos,
v,
status,
heat,
max_heat,
max_accelarate,
commands,
}
})
.collect();
(tick, strange, ships)
} else {
(0, 0..0, vec![])
};
Response {
stage,
info: Info {
deadline,
role,
ability,
range,
opponent_params,
},
state: State {
tick,
range: strange,
ships,
},
}
}
| at!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[ | identifier_body |
client.rs | use crate::parser::*;
use crate::*;
use ::reqwest::blocking as reqwest;
use itertools::*;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Range;
use std::time::SystemTime;
use std::sync::{Arc, Mutex};
use chrono::{Utc, Local, DateTime, NaiveDateTime};
lazy_static! {
static ref last_send: Arc<Mutex<Option<DateTime<Utc>>>> =
Arc::new(Mutex::new(None));
}
#[derive(Debug, Clone)]
pub struct Response {
pub stage: i32,
pub info: Info,
pub state: State,
}
fn response_to_json(x: &Response) -> String {
map_to_json(vec![
("stage", format!("{}", x.stage)),
("info", info_to_json(&x.info)),
("state", state_to_json(&x.state)),
])
}
#[derive(Debug, Clone)]
pub struct Info {
pub deadline: i32,
pub role: i32,
pub ability: Ability,
pub range: Range<i32>,
pub opponent_params: Params,
}
fn info_to_json(x: &Info) -> String {
map_to_json(vec![
("deadline", format!("{}", x.deadline)),
("role", format!("{}", x.role)),
("opponent_params", params_to_json(&x.opponent_params)),
])
}
#[derive(Debug, Clone)]
pub struct Ability {
pub potential: i32,
pub max_accelarate: i32,
pub max_heat: i32,
}
#[derive(Debug, Clone)]
pub struct State {
pub tick: i32,
pub range: Range<i32>, // 侵入可能エリアの x,y の絶対値の範囲
pub ships: Vec<Ship>,
}
fn state_to_json(x: &State) -> String {
let mut ships = Vec::new();
for s in &x.ships {
ships.push(ship_to_json(&s));
}
map_to_json(vec![
("tick", format!("{}", x.tick)),
("ships", format!("[{}]", ships.join(","))),
])
}
#[derive(Debug, Clone)]
pub struct Ship {
pub role: i32,
pub id: i32,
pub pos: (i32, i32),
pub v: (i32, i32),
pub status: Params,
pub heat: i32,
pub max_heat: i32,
pub max_accelarate: i32,
pub commands: Vec<Command>,
}
fn ship_to_json(x: &Ship) -> String {
let mut commands = Vec::new();
for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &s | {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if!printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&format!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len()!= 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]);
let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]]
// [src/bin/app.rs:177] &commands = [
// Pair(
// Num(
// 0,
// ),
// Pair(
// Pair(
// Num(
// 0,
// ),
// Num(
// -1,
// ),
// ),
// Nil,
// ),
// ),
// ]
let commands = commands.into_iter().map(|e| e.as_ref().into()).collect();
Ship {
role,
id,
pos,
v,
status,
heat,
max_heat,
max_accelarate,
commands,
}
})
.collect();
(tick, strange, ships)
} else {
(0, 0..0, vec![])
};
Response {
stage,
info: Info {
deadline,
role,
ability,
range,
opponent_params,
},
state: State {
tick,
range: strange,
ships,
},
}
}
| tr) | identifier_name |
client.rs | use crate::parser::*;
use crate::*;
use ::reqwest::blocking as reqwest;
use itertools::*;
use std::cell::RefCell;
use std::env;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Range;
use std::time::SystemTime;
use std::sync::{Arc, Mutex};
use chrono::{Utc, Local, DateTime, NaiveDateTime};
lazy_static! {
static ref last_send: Arc<Mutex<Option<DateTime<Utc>>>> =
Arc::new(Mutex::new(None));
}
#[derive(Debug, Clone)]
pub struct Response {
pub stage: i32,
pub info: Info,
pub state: State,
}
fn response_to_json(x: &Response) -> String {
map_to_json(vec![
("stage", format!("{}", x.stage)),
("info", info_to_json(&x.info)),
("state", state_to_json(&x.state)),
])
}
#[derive(Debug, Clone)]
pub struct Info {
pub deadline: i32,
pub role: i32,
pub ability: Ability,
pub range: Range<i32>,
pub opponent_params: Params,
}
fn info_to_json(x: &Info) -> String {
map_to_json(vec![
("deadline", format!("{}", x.deadline)),
("role", format!("{}", x.role)),
("opponent_params", params_to_json(&x.opponent_params)),
])
}
#[derive(Debug, Clone)]
pub struct Ability {
pub potential: i32,
pub max_accelarate: i32,
pub max_heat: i32,
}
#[derive(Debug, Clone)]
pub struct State {
pub tick: i32,
pub range: Range<i32>, // 侵入可能エリアの x,y の絶対値の範囲
pub ships: Vec<Ship>,
}
fn state_to_json(x: &State) -> String {
let mut ships = Vec::new();
for s in &x.ships {
ships.push(ship_to_json(&s));
}
map_to_json(vec![
("tick", format!("{}", x.tick)),
("ships", format!("[{}]", ships.join(","))),
])
}
#[derive(Debug, Clone)]
pub struct Ship {
pub role: i32,
pub id: i32,
pub pos: (i32, i32),
pub v: (i32, i32),
pub status: Params,
pub heat: i32,
pub max_heat: i32,
pub max_accelarate: i32,
pub commands: Vec<Command>,
}
fn ship_to_json(x: &Ship) -> String {
let mut commands = Vec::new();
for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &str) {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if!printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&format!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len()!= 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]); | // [src/bin/app.rs:177] &commands = [
// Pair(
// Num(
// 0,
// ),
// Pair(
// Pair(
// Num(
// 0,
// ),
// Num(
// -1,
// ),
// ),
// Nil,
// ),
// ),
// ]
let commands = commands.into_iter().map(|e| e.as_ref().into()).collect();
Ship {
role,
id,
pos,
v,
status,
heat,
max_heat,
max_accelarate,
commands,
}
})
.collect();
(tick, strange, ships)
} else {
(0, 0..0, vec![])
};
Response {
stage,
info: Info {
deadline,
role,
ability,
range,
opponent_params,
},
state: State {
tick,
range: strange,
ships,
},
}
} | let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]] | random_line_split |
mod.rs | //! # Ready-to-use NLP pipelines and models
//!
//! Based on Huggingface's pipelines, ready to use end-to-end NLP pipelines are available as part of this crate. The following capabilities are currently available:
//!
//! **Disclaimer**
//! The contributors of this repository are not responsible for any generation from the 3rd party utilization of the pretrained systems proposed herein.
//!
//! #### 1. Question Answering
//! Extractive question answering from a given question and context. DistilBERT model finetuned on SQuAD (Stanford Question Answering Dataset)
//!
//! ```ignore
//! use rust_bert::pipelines::question_answering::{QaInput, QuestionAnsweringModel};
//! # fn main() -> anyhow::Result<()> {
//! let qa_model = QuestionAnsweringModel::new(Default::default())?;
//!
//! let question = String::from("Where does Amy live?");
//! let context = String::from("Amy lives in Amsterdam");
//!
//! let answers = qa_model.predict(&[QaInput { question, context }], 1, 32);
//! # Ok(())
//! # }
//! ```
//!
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::question_answering::Answer;
//! # let output =
//! [Answer {
//! score: 0.9976,
//! start: 13,
//! end: 21,
//! answer: String::from("Amsterdam"),
//! }]
//! # ;
//! ```
//!
//! #### 2. Translation
//! Translation using the MarianMT architecture and pre-trained models from the Opus-MT team from Language Technology at the University of Helsinki.
//! Currently supported languages are :
//! - English <-> French
//! - English <-> Spanish
//! - English <-> Portuguese
//! - English <-> Italian
//! - English <-> Catalan
//! - English <-> German
//! - English <-> Russian
//! - English <-> Chinese (Simplified)
//! - English <-> Chinese (Traditional)
//! - English <-> Dutch
//! - English <-> Swedish
//! - English <-> Arabic
//! - English <-> Hebrew
//! - English <-> Hindi
//! - French <-> German
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! # use rust_bert::pipelines::generation_utils::LanguageGenerator;
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::translation::{
//! Language, TranslationConfig, TranslationModel, TranslationModelBuilder,
//! };
//! use tch::Device;
//! let model = TranslationModelBuilder::new()
//! .with_device(Device::cuda_if_available())
//! .with_model_type(ModelType::Marian)
//! .with_source_languages(vec![Language::English])
//! .with_target_languages(vec![Language::French])
//! .create_model()?;
//!
//! let input = ["This is a sentence to be translated"];
//!
//! let output = model.translate(&input, None, Language::French);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```ignore
//! # let output =
//! " Il s'agit d'une phrase à traduire"
//! # ;
//! ```
//!
//! Output: \
//! ```ignore
//! # let output =
//! "Il s'agit d'une phrase à traduire"
//! # ;
//! ```
//!
//! #### 3. Summarization
//! Abstractive summarization of texts based on the BART encoder-decoder architecture
//! Include techniques such as beam search, top-k and nucleus sampling, temperature setting and repetition penalty.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! # use rust_bert::pipelines::generation_utils::LanguageGenerator;
//! use rust_bert::pipelines::summarization::SummarizationModel;
//!
//! let mut model = SummarizationModel::new(Default::default())?;
//!
//! let input = ["In findings published Tuesday in Cornell University's arXiv by a team of scientists
//! from the University of Montreal and a separate report published Wednesday in Nature Astronomy by a team
//! from University College London (UCL), the presence of water vapour was confirmed in the atmosphere of K2-18b,
//! a planet circling a star in the constellation Leo. This is the first such discovery in a planet in its star's
//! habitable zone — not too hot and not too cold for liquid water to exist. The Montreal team, led by Björn Benneke,
//! used data from the NASA's Hubble telescope to assess changes in the light coming from K2-18b's star as the planet
//! passed between it and Earth. They found that certain wavelengths of light, which are usually absorbed by water,
//! weakened when the planet was in the way, indicating not only does K2-18b have an atmosphere, but the atmosphere
//! contains water in vapour form. The team from UCL then analyzed the Montreal team's data using their own software
//! and confirmed their conclusion. This was not the first time scientists have found signs of water on an exoplanet,
//! but previous discoveries were made on planets with high temperatures or other pronounced differences from Earth.
//! \"This is the first potentially habitable planet where the temperature is right and where we now know there is water,\"
//! said UCL astronomer Angelos Tsiaras. \"It's the best candidate for habitability right now.\" \"It's a good sign\",
//! said Ryan Cloutier of the Harvard–Smithsonian Center for Astrophysics, who was not one of either study's authors.
//! \"Overall,\" he continued, \"the presence of water in its atmosphere certainly improves the prospect of K2-18b being
//! a potentially habitable planet, but further observations will be required to say for sure. \"
//! K2-18b was first identified in 2015 by the Kepler space telescope. It is about 110 light-years from Earth and larger
//! but less dense. Its star, a red dwarf, is cooler than the Sun, but the planet's orbit is much closer, such that a year
//! on K2-18b lasts 33 Earth days. According to The Guardian, astronomers were optimistic that NASA's James Webb space
//! telescope — scheduled for launch in 2021 — and the European Space Agency's 2028 ARIEL program, could reveal more
//! about exoplanets like K2-18b."];
//!
//! let output = model.summarize(&input);
//! # Ok(())
//! # }
//! ```
//! (example from: [WikiNews](https://en.wikinews.org/wiki/Astronomers_find_water_vapour_in_atmosphere_of_exoplanet_K2-18b))
//!
//! Example output: \
//! ```ignore
//! # let output =
//! "Scientists have found water vapour on K2-18b, a planet 110 light-years from Earth.
//! This is the first such discovery in a planet in its star's habitable zone.
//! The planet is not too hot and not too cold for liquid water to exist."
//! # ;
//! ```
//!
//!
//! #### 4. Dialogue Model
//! Conversation model based on Microsoft's [DialoGPT](https://github.com/microsoft/DialoGPT).
//! This pipeline allows the generation of single or multi-turn conversations between a human and a model.
//! The DialoGPT's page states that
//! > The human evaluation results indicate that the response generated from DialoGPT is comparable to human response quality
//! > under a single-turn conversation Turing test. ([DialoGPT repository](https://github.com/microsoft/DialoGPT))
//!
//! The model uses a `ConversationManager` to keep track of active conversations and generate responses to them.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::conversation::{ConversationManager, ConversationModel};
//! let conversation_model = ConversationModel::new(Default::default())?;
//! let mut conversation_manager = ConversationManager::new();
//!
//! let conversation_id =
//! conversation_manager.create("Going to the movies tonight - any suggestions?");
//! let output = conversation_model.generate_responses(&mut conversation_manager);
//! # Ok(())
//! # }
//! ```
//! Example output: \
//! ```ignore
//! # let output =
//! "The Big Lebowski."
//! # ;
//! ```
//!
//! #### 5. Natural Language Generation
//! Generate language based on a prompt. GPT2 and GPT available as base models.
//! Include techniques such as beam search, top-k and nucleus sampling, temperature setting and repetition penalty.
//! Supports batch generation of sentences from several prompts. Sequences will be left-padded with the model's padding token if present, the unknown token otherwise.
//! This may impact the results and it is recommended to submit prompts of similar length for best results. Additional information on the input parameters for generation is provided in this module's documentation.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::text_generation::TextGenerationModel;
//! use rust_bert::pipelines::common::ModelType;
//! let mut model = TextGenerationModel::new(Default::default())?;
//! let input_context_1 = "The dog";
//! let input_context_2 = "The cat was";
//!
//! let prefix = None; // Optional prefix to append prompts with, will be excluded from the generated output
//!
//! let output = model.generate(&[input_context_1, input_context_2], prefix);
//! # Ok(())
//! # }
//! ```
//! Example output: \
//! ```ignore
//! # let output =
//! [
//! "The dog's owners, however, did not want to be named. According to the lawsuit, the animal's owner, a 29-year",
//! "The dog has always been part of the family. \"He was always going to be my dog and he was always looking out for me",
//! "The dog has been able to stay in the home for more than three months now. \"It's a very good dog. She's",
//! "The cat was discovered earlier this month in the home of a relative of the deceased. The cat\'s owner, who wished to remain anonymous,",
//! "The cat was pulled from the street by two-year-old Jazmine.\"I didn't know what to do,\" she said",
//! "The cat was attacked by two stray dogs and was taken to a hospital. Two other cats were also injured in the attack and are being treated."
//! ]
//! # ;
//! ```
//!
//! #### 6. Zero-shot classification
//! Performs zero-shot classification on input sentences with provided labels using a model fine-tuned for Natural Language Inference.
//! ```ignore
//! # use rust_bert::pipelines::zero_shot_classification::ZeroShotClassificationModel;
//! # fn main() -> anyhow::Result<()> {
//! let sequence_classification_model = ZeroShotClassificationModel::new(Default::default())?;
//! let input_sentence = "Who are you voting for in 2020?";
//! let input_sequence_2 = "The prime minister has announced a stimulus package which was widely criticized by the opposition.";
//! let candidate_labels = &["politics", "public health", "economics", "sports"];
//! let output = sequence_classification_model.predict_multilabel(
//! &[input_sentence, input_sequence_2],
//! candidate_labels,
//! None,
//! 128,
//! ); | //! ```ignore
//! # use rust_bert::pipelines::sequence_classification::Label;
//! let output = [
//! [
//! Label {
//! text: "politics".to_string(),
//! score: 0.972,
//! id: 0,
//! sentence: 0,
//! },
//! Label {
//! text: "public health".to_string(),
//! score: 0.032,
//! id: 1,
//! sentence: 0,
//! },
//! Label {
//! text: "economics".to_string(),
//! score: 0.006,
//! id: 2,
//! sentence: 0,
//! },
//! Label {
//! text: "sports".to_string(),
//! score: 0.004,
//! id: 3,
//! sentence: 0,
//! },
//! ],
//! [
//! Label {
//! text: "politics".to_string(),
//! score: 0.975,
//! id: 0,
//! sentence: 1,
//! },
//! Label {
//! text: "economics".to_string(),
//! score: 0.852,
//! id: 2,
//! sentence: 1,
//! },
//! Label {
//! text: "public health".to_string(),
//! score: 0.0818,
//! id: 1,
//! sentence: 1,
//! },
//! Label {
//! text: "sports".to_string(),
//! score: 0.001,
//! id: 3,
//! sentence: 1,
//! },
//! ],
//! ]
//!.to_vec();
//! ```
//!
//! #### 7. Sentiment analysis
//! Predicts the binary sentiment for a sentence. DistilBERT model finetuned on SST-2.
//! ```ignore
//! use rust_bert::pipelines::sentiment::SentimentModel;
//! # fn main() -> anyhow::Result<()> {
//! let sentiment_model = SentimentModel::new(Default::default())?;
//! let input = [
//! "Probably my all-time favorite movie, a story of selflessness, sacrifice and dedication to a noble cause, but it's not preachy or boring.",
//! "This film tried to be too many things all at once: stinging political satire, Hollywood blockbuster, sappy romantic comedy, family values promo...",
//! "If you like original gut wrenching laughter you will like this movie. If you are young or old then you will love this movie, hell even my mom liked it.",
//! ];
//! let output = sentiment_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! (Example courtesy of [IMDb](http://www.imdb.com))
//!
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::sentiment::Sentiment;
//! # use rust_bert::pipelines::sentiment::SentimentPolarity::{Positive, Negative};
//! # let output =
//! [
//! Sentiment {
//! polarity: Positive,
//! score: 0.998,
//! },
//! Sentiment {
//! polarity: Negative,
//! score: 0.992,
//! },
//! Sentiment {
//! polarity: Positive,
//! score: 0.999,
//! },
//! ]
//! # ;
//! ```
//!
//! #### 8. Named Entity Recognition
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text. The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! Additional pre-trained models are available for English, German, Spanish and Dutch.
//! ```ignore
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! #### 9. Keywords/Keyphrases extraction
//!
//! Extract keywords and keyphrases extractions from input documents. Based on a sentence embedding model
//! to compute the semantic similarity between the full text and word n-grams composing it.
//!
//!```no_run
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::keywords_extraction::KeywordExtractionModel;
//! let keyword_extraction_model = KeywordExtractionModel::new(Default::default())?;
//!
//! let input = "Rust is a multi-paradigm, general-purpose programming language. \
//! Rust emphasizes performance, type safety, and concurrency. Rust enforces memory safety—that is, \
//! that all references point to valid memory—without requiring the use of a garbage collector or \
//! reference counting present in other memory-safe languages. To simultaneously enforce \
//! memory safety and prevent concurrent data races, Rust's borrow checker tracks the object lifetime \
//! and variable scope of all references in a program during compilation. Rust is popular for \
//! systems programming but also offers high-level features including functional programming constructs.";
//! // Credits: Wikimedia https://en.wikipedia.org/wiki/Rust_(programming_language)
//! let output = keyword_extraction_model.predict(&[input])?;
//! Ok(())
//! }
//! ```
//! Output:
//! ```no_run
//! # let output =
//! [
//! ("rust", 0.50910604),
//! ("concurrency", 0.33825397),
//! ("languages", 0.28515345),
//! ("compilation", 0.2801403),
//! ("safety", 0.2657791),
//! ]
//! # ;
//! ```
//!
//! #### 10. Part of Speech tagging
//! Extracts Part of Speech tags (Noun, Verb, Adjective...) from text.
//! ```ignore
//! use rust_bert::pipelines::pos_tagging::POSModel;
//! # fn main() -> anyhow::Result<()> {
//! let pos_model = POSModel::new(Default::default())?;
//! let input = ["My name is Bob"];
//! let output = pos_model.encode_as_tensor(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```ignore
//! # use rust_bert::pipelines::pos_tagging::POSTag;
//! # let output =
//! [
//! POSTag {
//! word: String::from("My"),
//! score: 0.1560,
//! label: String::from("PRP"),
//! },
//! POSTag {
//! word: String::from("name"),
//! score: 0.6565,
//! label: String::from("NN"),
//! },
//! POSTag {
//! word: String::from("is"),
//! score: 0.3697,
//! label: String::from("VBZ"),
//! },
//! POSTag {
//! word: String::from("Bob"),
//! score: 0.7460,
//! label: String::from("NNP"),
//! },
//! ]
//! # ;
//! ```
//!
//! #### 11. Sentence embeddings
//!
//! Generate sentence embeddings (vector representation). These can be used for applications including dense information retrieval.
//!```ignore
//! # use rust_bert::pipelines::sentence_embeddings::{SentenceEmbeddingsBuilder, SentenceEmbeddingsModelType};
//! # fn main() -> anyhow::Result<()> {
//! let model = SentenceEmbeddingsBuilder::remote(
//! SentenceEmbeddingsModelType::AllMiniLmL12V2
//! ).create_model()?;
//!
//! let sentences = [
//! "this is an example sentence",
//! "each sentence is converted"
//! ];
//!
//! let output = model.encode(&sentences);
//! # Ok(())
//! # }
//! ```
//! Output:
//! ```ignore
//! # let output =
//! [
//! [-0.000202666, 0.08148022, 0.03136178, 0.002920636],
//! [0.064757116, 0.048519745, -0.01786038, -0.0479775],
//! ]
//! # ;
//! ```
//!
//! # [Tokenizers](https://github.com/huggingface/tokenizers) support
//!
//! The pipelines support both the default [rust-tokenizers](https://github.com/guillaume-be/rust-tokenizers) and
//! Hugging Face's [Tokenizers](https://github.com/huggingface/tokenizers) library. In order to use the latter,
//! the tokenizer needs to be created manually and passed as an argument to the pipeline's `new_with_tokenizer` method.
//!
//! Note that the `special_token_maps` is required to create a `TokenizerOption` from a HFTokenizer. This file is sometimes not provided
//! (the Python Transformers library provides the special token map information as part of the actual tokenizer loaded wrapping the rust-based
//! tokenizer). If that is the case a temporary file with the special token map information can be created as illustrated below:
//! ```no_run
//! fn main() -> anyhow::Result<()> {
//! use std::fs::File;
//! use std::io::Write;
//! use tempfile::TempDir;
//! use rust_bert::pipelines::common::{ModelType, TokenizerOption};
//! use rust_bert::pipelines::text_generation::{TextGenerationConfig, TextGenerationModel};
//! use rust_bert::resources::{RemoteResource, ResourceProvider};
//!
//! let generate_config = TextGenerationConfig {
//! model_type: ModelType::GPT2,
//! ..Default::default()
//! };
//!
//! // Create tokenizer
//! let tmp_dir = TempDir::new()?;
//! let special_token_map_path = tmp_dir.path().join("special_token_map.json");
//! let mut tmp_file = File::create(&special_token_map_path)?;
//! writeln!(
//! tmp_file,
//! r#"{{"bos_token": "", "eos_token": "", "unk_token": ""}}"#
//! )?;
//! let tokenizer_path = RemoteResource::from_pretrained((
//! "gpt2/tokenizer",
//! "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
//! )).get_local_path()?;
//! let tokenizer =
//! TokenizerOption::from_hf_tokenizer_file(tokenizer_path, special_token_map_path)?;
//!
//! // Create model
//! let model = TextGenerationModel::new_with_tokenizer(generate_config, tokenizer)?;
//!
//! let input_context = "The dog";
//! let output = model.generate(&[input_context], None);
//! for sentence in output {
//! println!("{sentence:?}");
//! }
//! Ok(())
//! }
//! ```
pub mod common;
pub mod conversation;
pub mod generation_utils;
pub mod keywords_extraction;
pub mod masked_language;
pub mod ner;
pub mod pos_tagging;
pub mod question_answering;
pub mod sentence_embeddings;
pub mod sentiment;
pub mod sequence_classification;
pub mod summarization;
pub mod text_generation;
pub mod token_classification;
pub mod translation;
pub mod zero_shot_classification;
#[cfg(feature = "onnx")]
pub mod onnx;
#[cfg(feature = "hf-tokenizers")]
pub mod hf_tokenizers; | //! # Ok(())
//! # }
//! ```
//!
//! outputs: | random_line_split |
call.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
pallet::{
parse::call::{CallVariantDef, CallWeightDef},
Def,
},
COUNTER,
};
use proc_macro2::TokenStream as TokenStream2;
use quote::{quote, ToTokens};
use syn::spanned::Spanned;
///
/// * Generate enum call and implement various trait on it.
/// * Implement Callable and call_function on `Pallet`
pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream | let fn_name = methods.iter().map(|method| &method.name).collect::<Vec<_>>();
let call_index = methods.iter().map(|method| method.call_index).collect::<Vec<_>>();
let new_call_variant_fn_name = fn_name
.iter()
.map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name))
.collect::<Vec<_>>();
let new_call_variant_doc = fn_name
.iter()
.map(|fn_name| format!("Create a call with the variant `{}`.", fn_name))
.collect::<Vec<_>>();
let mut call_index_warnings = Vec::new();
// Emit a warning for each call that is missing `call_index` when not in dev-mode.
for method in &methods {
if method.explicit_call_index || def.dev_mode {
continue
}
let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
.index(call_index_warnings.len())
.old("use implicit call indices")
.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
.help_links(&[
"https://github.com/paritytech/substrate/pull/12891",
"https://github.com/paritytech/substrate/pull/11381"
])
.span(method.name.span())
.build();
call_index_warnings.push(warning);
}
let mut fn_weight = Vec::<TokenStream2>::new();
let mut weight_warnings = Vec::new();
for method in &methods {
match &method.weight {
CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if!def.dev_mode => {
let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
.index(weight_warnings.len())
.old("use hard-coded constant as call weight")
.new("benchmark all calls or put the pallet into `dev` mode")
.help_link("https://github.com/paritytech/substrate/pull/13798")
.span(lit.span())
.build();
weight_warnings.push(warning);
fn_weight.push(e.into_token_stream());
},
CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
CallWeightDef::Inherited => {
let pallet_weight = def
.call
.as_ref()
.expect("we have methods; we have calls; qed")
.inherited_call_weight
.as_ref()
.expect("the parser prevents this");
// Expand `<<T as Config>::WeightInfo>::call_name()`.
let t = &pallet_weight.typename;
let n = &method.name;
fn_weight.push(quote!({ < #t > :: #n () }));
},
}
}
debug_assert_eq!(fn_weight.len(), methods.len());
let map_fn_docs = if!def.dev_mode {
// Emit the [`Pallet::method`] documentation only for non-dev modes.
|method: &CallVariantDef| {
let reference = format!("See [`Pallet::{}`].", method.name);
quote!(#reference)
}
} else {
// For the dev-mode do not provide a documenation link as it will break the
// `cargo doc` if the pallet is private inside a test.
|method: &CallVariantDef| {
let reference = format!("See `Pallet::{}`.", method.name);
quote!(#reference)
}
};
let fn_doc = methods.iter().map(map_fn_docs).collect::<Vec<_>>();
let args_name = methods
.iter()
.map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_name_stripped = methods
.iter()
.map(|method| {
method
.args
.iter()
.map(|(_, name, _)| {
syn::Ident::new(name.to_string().trim_start_matches('_'), name.span())
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let make_args_name_pattern = |ref_tok| {
args_name
.iter()
.zip(args_name_stripped.iter())
.map(|(args_name, args_name_stripped)| {
args_name
.iter()
.zip(args_name_stripped)
.map(|(args_name, args_name_stripped)| {
if args_name == args_name_stripped {
quote::quote!( #ref_tok #args_name )
} else {
quote::quote!( #args_name_stripped: #ref_tok #args_name )
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
let args_name_pattern = make_args_name_pattern(None);
let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref)));
let args_type = methods
.iter()
.map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_compact_attr = methods.iter().map(|method| {
method
.args
.iter()
.map(|(is_compact, _, type_)| {
if *is_compact {
quote::quote_spanned!(type_.span() => #[codec(compact)] )
} else {
quote::quote!()
}
})
.collect::<Vec<_>>()
});
let default_docs =
[syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")];
let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] };
let maybe_compile_error = if def.call.is_none() {
quote::quote! {
compile_error!(concat!(
"`",
stringify!($pallet_name),
"` does not have #[pallet::call] defined, perhaps you should remove `Call` from \
construct_runtime?",
));
}
} else {
proc_macro2::TokenStream::new()
};
let count = COUNTER.with(|counter| counter.borrow_mut().inc());
let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span);
let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" };
// Wrap all calls inside of storage layers
if let Some(syn::Item::Impl(item_impl)) = def
.call
.as_ref()
.map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index])
{
item_impl.items.iter_mut().for_each(|i| {
if let syn::ImplItem::Fn(method) = i {
let block = &method.block;
method.block = syn::parse_quote! {{
// We execute all dispatchable in a new storage layer, allowing them
// to return an error at any point, and undoing any storage changes.
#frame_support::storage::with_storage_layer(|| #block)
}};
}
});
}
// Extracts #[allow] attributes, necessary so that we don't run into compiler warnings
let maybe_allow_attrs = methods
.iter()
.map(|method| {
method
.attrs
.iter()
.find(|attr| attr.path().is_ident("allow"))
.map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream())
})
.collect::<Vec<_>>();
quote::quote_spanned!(span =>
mod warnings {
#(
#call_index_warnings
)*
#(
#weight_warnings
)*
}
#[doc(hidden)]
pub mod __substrate_call_check {
#[macro_export]
#[doc(hidden)]
macro_rules! #macro_ident {
($pallet_name:ident) => {
#maybe_compile_error
};
}
#[doc(hidden)]
pub use #macro_ident as is_call_part_defined;
}
#( #[doc = #docs] )*
#[derive(
#frame_support::RuntimeDebugNoBound,
#frame_support::CloneNoBound,
#frame_support::EqNoBound,
#frame_support::PartialEqNoBound,
#frame_support::__private::codec::Encode,
#frame_support::__private::codec::Decode,
#frame_support::__private::scale_info::TypeInfo,
)]
#[codec(encode_bound())]
#[codec(decode_bound())]
#[scale_info(skip_type_params(#type_use_gen), capture_docs = #capture_docs)]
#[allow(non_camel_case_types)]
pub enum #call_ident<#type_decl_bounded_gen> #where_clause {
#[doc(hidden)]
#[codec(skip)]
__Ignore(
#frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen,)>,
#frame_support::Never,
),
#(
#[doc = #fn_doc]
#[codec(index = #call_index)]
#fn_name {
#(
#[allow(missing_docs)]
#args_compact_attr #args_name_stripped: #args_type
),*
},
)*
}
impl<#type_impl_gen> #call_ident<#type_use_gen> #where_clause {
#(
#[doc = #new_call_variant_doc]
pub fn #new_call_variant_fn_name(
#( #args_name_stripped: #args_type ),*
) -> Self {
Self::#fn_name {
#( #args_name_stripped ),*
}
}
)*
}
impl<#type_impl_gen> #frame_support::dispatch::GetDispatchInfo
for #call_ident<#type_use_gen>
#where_clause
{
fn get_dispatch_info(&self) -> #frame_support::dispatch::DispatchInfo {
match *self {
#(
Self::#fn_name { #( #args_name_pattern_ref, )* } => {
let __pallet_base_weight = #fn_weight;
let __pallet_weight = <
dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )>
>::weigh_data(&__pallet_base_weight, ( #( #args_name, )* ));
let __pallet_class = <
dyn #frame_support::dispatch::ClassifyDispatch<
( #( & #args_type, )* )
>
>::classify_dispatch(&__pallet_base_weight, ( #( #args_name, )* ));
let __pallet_pays_fee = <
dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )>
>::pays_fee(&__pallet_base_weight, ( #( #args_name, )* ));
#frame_support::dispatch::DispatchInfo {
weight: __pallet_weight,
class: __pallet_class,
pays_fee: __pallet_pays_fee,
}
},
)*
Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"),
}
}
}
impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen>
#where_clause
{
fn get_call_name(&self) -> &'static str {
match *self {
#( Self::#fn_name {.. } => stringify!(#fn_name), )*
Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."),
}
}
fn get_call_names() -> &'static [&'static str] {
&[ #( stringify!(#fn_name), )* ]
}
}
impl<#type_impl_gen> #frame_support::dispatch::GetCallIndex for #call_ident<#type_use_gen>
#where_clause
{
fn get_call_index(&self) -> u8 {
match *self {
#( Self::#fn_name {.. } => #call_index, )*
Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."),
}
}
fn get_call_indices() -> &'static [u8] {
&[ #( #call_index, )* ]
}
}
impl<#type_impl_gen> #frame_support::traits::UnfilteredDispatchable
for #call_ident<#type_use_gen>
#where_clause
{
type RuntimeOrigin = #frame_system::pallet_prelude::OriginFor<T>;
fn dispatch_bypass_filter(
self,
origin: Self::RuntimeOrigin
) -> #frame_support::dispatch::DispatchResultWithPostInfo {
#frame_support::dispatch_context::run_in_context(|| {
match self {
#(
Self::#fn_name { #( #args_name_pattern, )* } => {
#frame_support::__private::sp_tracing::enter_span!(
#frame_support::__private::sp_tracing::trace_span!(stringify!(#fn_name))
);
#maybe_allow_attrs
<#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* )
.map(Into::into).map_err(Into::into)
},
)*
Self::__Ignore(_, _) => {
let _ = origin; // Use origin for empty Call enum
unreachable!("__PhantomItem cannot be used.");
},
}
})
}
}
impl<#type_impl_gen> #frame_support::dispatch::Callable<T> for #pallet_ident<#type_use_gen>
#where_clause
{
type RuntimeCall = #call_ident<#type_use_gen>;
}
impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause {
#[doc(hidden)]
pub fn call_functions() -> #frame_support::__private::metadata_ir::PalletCallMetadataIR {
#frame_support::__private::scale_info::meta_type::<#call_ident<#type_use_gen>>().into()
}
}
)
}
| {
let (span, where_clause, methods, docs) = match def.call.as_ref() {
Some(call) => {
let span = call.attr_span;
let where_clause = call.where_clause.clone();
let methods = call.methods.clone();
let docs = call.docs.clone();
(span, where_clause, methods, docs)
},
None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()),
};
let frame_support = &def.frame_support;
let frame_system = &def.frame_system;
let type_impl_gen = &def.type_impl_generics(span);
let type_decl_bounded_gen = &def.type_decl_bounded_generics(span);
let type_use_gen = &def.type_use_generics(span);
let call_ident = syn::Ident::new("Call", span);
let pallet_ident = &def.pallet_struct.pallet;
| identifier_body |
call.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
pallet::{
parse::call::{CallVariantDef, CallWeightDef},
Def,
},
COUNTER,
};
use proc_macro2::TokenStream as TokenStream2;
use quote::{quote, ToTokens};
use syn::spanned::Spanned;
///
/// * Generate enum call and implement various trait on it.
/// * Implement Callable and call_function on `Pallet`
pub fn | (def: &mut Def) -> proc_macro2::TokenStream {
let (span, where_clause, methods, docs) = match def.call.as_ref() {
Some(call) => {
let span = call.attr_span;
let where_clause = call.where_clause.clone();
let methods = call.methods.clone();
let docs = call.docs.clone();
(span, where_clause, methods, docs)
},
None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()),
};
let frame_support = &def.frame_support;
let frame_system = &def.frame_system;
let type_impl_gen = &def.type_impl_generics(span);
let type_decl_bounded_gen = &def.type_decl_bounded_generics(span);
let type_use_gen = &def.type_use_generics(span);
let call_ident = syn::Ident::new("Call", span);
let pallet_ident = &def.pallet_struct.pallet;
let fn_name = methods.iter().map(|method| &method.name).collect::<Vec<_>>();
let call_index = methods.iter().map(|method| method.call_index).collect::<Vec<_>>();
let new_call_variant_fn_name = fn_name
.iter()
.map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name))
.collect::<Vec<_>>();
let new_call_variant_doc = fn_name
.iter()
.map(|fn_name| format!("Create a call with the variant `{}`.", fn_name))
.collect::<Vec<_>>();
let mut call_index_warnings = Vec::new();
// Emit a warning for each call that is missing `call_index` when not in dev-mode.
for method in &methods {
if method.explicit_call_index || def.dev_mode {
continue
}
let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
.index(call_index_warnings.len())
.old("use implicit call indices")
.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
.help_links(&[
"https://github.com/paritytech/substrate/pull/12891",
"https://github.com/paritytech/substrate/pull/11381"
])
.span(method.name.span())
.build();
call_index_warnings.push(warning);
}
let mut fn_weight = Vec::<TokenStream2>::new();
let mut weight_warnings = Vec::new();
for method in &methods {
match &method.weight {
CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if!def.dev_mode => {
let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
.index(weight_warnings.len())
.old("use hard-coded constant as call weight")
.new("benchmark all calls or put the pallet into `dev` mode")
.help_link("https://github.com/paritytech/substrate/pull/13798")
.span(lit.span())
.build();
weight_warnings.push(warning);
fn_weight.push(e.into_token_stream());
},
CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
CallWeightDef::Inherited => {
let pallet_weight = def
.call
.as_ref()
.expect("we have methods; we have calls; qed")
.inherited_call_weight
.as_ref()
.expect("the parser prevents this");
// Expand `<<T as Config>::WeightInfo>::call_name()`.
let t = &pallet_weight.typename;
let n = &method.name;
fn_weight.push(quote!({ < #t > :: #n () }));
},
}
}
debug_assert_eq!(fn_weight.len(), methods.len());
let map_fn_docs = if!def.dev_mode {
// Emit the [`Pallet::method`] documentation only for non-dev modes.
|method: &CallVariantDef| {
let reference = format!("See [`Pallet::{}`].", method.name);
quote!(#reference)
}
} else {
// For the dev-mode do not provide a documenation link as it will break the
// `cargo doc` if the pallet is private inside a test.
|method: &CallVariantDef| {
let reference = format!("See `Pallet::{}`.", method.name);
quote!(#reference)
}
};
let fn_doc = methods.iter().map(map_fn_docs).collect::<Vec<_>>();
let args_name = methods
.iter()
.map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_name_stripped = methods
.iter()
.map(|method| {
method
.args
.iter()
.map(|(_, name, _)| {
syn::Ident::new(name.to_string().trim_start_matches('_'), name.span())
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let make_args_name_pattern = |ref_tok| {
args_name
.iter()
.zip(args_name_stripped.iter())
.map(|(args_name, args_name_stripped)| {
args_name
.iter()
.zip(args_name_stripped)
.map(|(args_name, args_name_stripped)| {
if args_name == args_name_stripped {
quote::quote!( #ref_tok #args_name )
} else {
quote::quote!( #args_name_stripped: #ref_tok #args_name )
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
let args_name_pattern = make_args_name_pattern(None);
let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref)));
let args_type = methods
.iter()
.map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_compact_attr = methods.iter().map(|method| {
method
.args
.iter()
.map(|(is_compact, _, type_)| {
if *is_compact {
quote::quote_spanned!(type_.span() => #[codec(compact)] )
} else {
quote::quote!()
}
})
.collect::<Vec<_>>()
});
let default_docs =
[syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")];
let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] };
let maybe_compile_error = if def.call.is_none() {
quote::quote! {
compile_error!(concat!(
"`",
stringify!($pallet_name),
"` does not have #[pallet::call] defined, perhaps you should remove `Call` from \
construct_runtime?",
));
}
} else {
proc_macro2::TokenStream::new()
};
let count = COUNTER.with(|counter| counter.borrow_mut().inc());
let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span);
let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" };
// Wrap all calls inside of storage layers
if let Some(syn::Item::Impl(item_impl)) = def
.call
.as_ref()
.map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index])
{
item_impl.items.iter_mut().for_each(|i| {
if let syn::ImplItem::Fn(method) = i {
let block = &method.block;
method.block = syn::parse_quote! {{
// We execute all dispatchable in a new storage layer, allowing them
// to return an error at any point, and undoing any storage changes.
#frame_support::storage::with_storage_layer(|| #block)
}};
}
});
}
// Extracts #[allow] attributes, necessary so that we don't run into compiler warnings
let maybe_allow_attrs = methods
.iter()
.map(|method| {
method
.attrs
.iter()
.find(|attr| attr.path().is_ident("allow"))
.map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream())
})
.collect::<Vec<_>>();
quote::quote_spanned!(span =>
mod warnings {
#(
#call_index_warnings
)*
#(
#weight_warnings
)*
}
#[doc(hidden)]
pub mod __substrate_call_check {
#[macro_export]
#[doc(hidden)]
macro_rules! #macro_ident {
($pallet_name:ident) => {
#maybe_compile_error
};
}
#[doc(hidden)]
pub use #macro_ident as is_call_part_defined;
}
#( #[doc = #docs] )*
#[derive(
#frame_support::RuntimeDebugNoBound,
#frame_support::CloneNoBound,
#frame_support::EqNoBound,
#frame_support::PartialEqNoBound,
#frame_support::__private::codec::Encode,
#frame_support::__private::codec::Decode,
#frame_support::__private::scale_info::TypeInfo,
)]
#[codec(encode_bound())]
#[codec(decode_bound())]
#[scale_info(skip_type_params(#type_use_gen), capture_docs = #capture_docs)]
#[allow(non_camel_case_types)]
pub enum #call_ident<#type_decl_bounded_gen> #where_clause {
#[doc(hidden)]
#[codec(skip)]
__Ignore(
#frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen,)>,
#frame_support::Never,
),
#(
#[doc = #fn_doc]
#[codec(index = #call_index)]
#fn_name {
#(
#[allow(missing_docs)]
#args_compact_attr #args_name_stripped: #args_type
),*
},
)*
}
impl<#type_impl_gen> #call_ident<#type_use_gen> #where_clause {
#(
#[doc = #new_call_variant_doc]
pub fn #new_call_variant_fn_name(
#( #args_name_stripped: #args_type ),*
) -> Self {
Self::#fn_name {
#( #args_name_stripped ),*
}
}
)*
}
impl<#type_impl_gen> #frame_support::dispatch::GetDispatchInfo
for #call_ident<#type_use_gen>
#where_clause
{
fn get_dispatch_info(&self) -> #frame_support::dispatch::DispatchInfo {
match *self {
#(
Self::#fn_name { #( #args_name_pattern_ref, )* } => {
let __pallet_base_weight = #fn_weight;
let __pallet_weight = <
dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )>
>::weigh_data(&__pallet_base_weight, ( #( #args_name, )* ));
let __pallet_class = <
dyn #frame_support::dispatch::ClassifyDispatch<
( #( & #args_type, )* )
>
>::classify_dispatch(&__pallet_base_weight, ( #( #args_name, )* ));
let __pallet_pays_fee = <
dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )>
>::pays_fee(&__pallet_base_weight, ( #( #args_name, )* ));
#frame_support::dispatch::DispatchInfo {
weight: __pallet_weight,
class: __pallet_class,
pays_fee: __pallet_pays_fee,
}
},
)*
Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"),
}
}
}
impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen>
#where_clause
{
fn get_call_name(&self) -> &'static str {
match *self {
#( Self::#fn_name {.. } => stringify!(#fn_name), )*
Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."),
}
}
fn get_call_names() -> &'static [&'static str] {
&[ #( stringify!(#fn_name), )* ]
}
}
impl<#type_impl_gen> #frame_support::dispatch::GetCallIndex for #call_ident<#type_use_gen>
#where_clause
{
fn get_call_index(&self) -> u8 {
match *self {
#( Self::#fn_name {.. } => #call_index, )*
Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."),
}
}
fn get_call_indices() -> &'static [u8] {
&[ #( #call_index, )* ]
}
}
impl<#type_impl_gen> #frame_support::traits::UnfilteredDispatchable
for #call_ident<#type_use_gen>
#where_clause
{
type RuntimeOrigin = #frame_system::pallet_prelude::OriginFor<T>;
fn dispatch_bypass_filter(
self,
origin: Self::RuntimeOrigin
) -> #frame_support::dispatch::DispatchResultWithPostInfo {
#frame_support::dispatch_context::run_in_context(|| {
match self {
#(
Self::#fn_name { #( #args_name_pattern, )* } => {
#frame_support::__private::sp_tracing::enter_span!(
#frame_support::__private::sp_tracing::trace_span!(stringify!(#fn_name))
);
#maybe_allow_attrs
<#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* )
.map(Into::into).map_err(Into::into)
},
)*
Self::__Ignore(_, _) => {
let _ = origin; // Use origin for empty Call enum
unreachable!("__PhantomItem cannot be used.");
},
}
})
}
}
impl<#type_impl_gen> #frame_support::dispatch::Callable<T> for #pallet_ident<#type_use_gen>
#where_clause
{
type RuntimeCall = #call_ident<#type_use_gen>;
}
impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause {
#[doc(hidden)]
pub fn call_functions() -> #frame_support::__private::metadata_ir::PalletCallMetadataIR {
#frame_support::__private::scale_info::meta_type::<#call_ident<#type_use_gen>>().into()
}
}
)
}
| expand_call | identifier_name |
call.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
pallet::{
parse::call::{CallVariantDef, CallWeightDef},
Def,
},
COUNTER,
};
use proc_macro2::TokenStream as TokenStream2;
use quote::{quote, ToTokens};
use syn::spanned::Spanned;
///
/// * Generate enum call and implement various trait on it.
/// * Implement Callable and call_function on `Pallet`
pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream {
let (span, where_clause, methods, docs) = match def.call.as_ref() {
Some(call) => {
let span = call.attr_span;
let where_clause = call.where_clause.clone();
let methods = call.methods.clone();
let docs = call.docs.clone();
(span, where_clause, methods, docs)
},
None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()),
};
let frame_support = &def.frame_support;
let frame_system = &def.frame_system;
let type_impl_gen = &def.type_impl_generics(span);
let type_decl_bounded_gen = &def.type_decl_bounded_generics(span);
let type_use_gen = &def.type_use_generics(span);
let call_ident = syn::Ident::new("Call", span);
let pallet_ident = &def.pallet_struct.pallet;
let fn_name = methods.iter().map(|method| &method.name).collect::<Vec<_>>();
let call_index = methods.iter().map(|method| method.call_index).collect::<Vec<_>>();
let new_call_variant_fn_name = fn_name
.iter()
.map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name))
.collect::<Vec<_>>();
let new_call_variant_doc = fn_name
.iter()
.map(|fn_name| format!("Create a call with the variant `{}`.", fn_name))
.collect::<Vec<_>>();
let mut call_index_warnings = Vec::new();
// Emit a warning for each call that is missing `call_index` when not in dev-mode.
for method in &methods {
if method.explicit_call_index || def.dev_mode {
continue
}
let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
.index(call_index_warnings.len())
.old("use implicit call indices")
.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
.help_links(&[
"https://github.com/paritytech/substrate/pull/12891",
"https://github.com/paritytech/substrate/pull/11381"
])
.span(method.name.span())
.build();
call_index_warnings.push(warning);
}
let mut fn_weight = Vec::<TokenStream2>::new();
let mut weight_warnings = Vec::new();
for method in &methods {
match &method.weight {
CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if!def.dev_mode => {
let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
.index(weight_warnings.len())
.old("use hard-coded constant as call weight")
.new("benchmark all calls or put the pallet into `dev` mode")
.help_link("https://github.com/paritytech/substrate/pull/13798")
.span(lit.span())
.build();
weight_warnings.push(warning);
fn_weight.push(e.into_token_stream());
},
CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
CallWeightDef::Inherited => {
let pallet_weight = def
.call
.as_ref()
.expect("we have methods; we have calls; qed")
.inherited_call_weight
.as_ref()
.expect("the parser prevents this");
// Expand `<<T as Config>::WeightInfo>::call_name()`.
let t = &pallet_weight.typename;
let n = &method.name;
fn_weight.push(quote!({ < #t > :: #n () }));
},
}
}
debug_assert_eq!(fn_weight.len(), methods.len());
let map_fn_docs = if!def.dev_mode {
// Emit the [`Pallet::method`] documentation only for non-dev modes.
|method: &CallVariantDef| {
let reference = format!("See [`Pallet::{}`].", method.name);
quote!(#reference)
}
} else {
// For the dev-mode do not provide a documenation link as it will break the
// `cargo doc` if the pallet is private inside a test.
|method: &CallVariantDef| {
let reference = format!("See `Pallet::{}`.", method.name);
quote!(#reference)
}
};
let fn_doc = methods.iter().map(map_fn_docs).collect::<Vec<_>>();
let args_name = methods
.iter()
.map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_name_stripped = methods
.iter()
.map(|method| {
method
.args
.iter()
.map(|(_, name, _)| {
syn::Ident::new(name.to_string().trim_start_matches('_'), name.span())
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let make_args_name_pattern = |ref_tok| {
args_name
.iter()
.zip(args_name_stripped.iter())
.map(|(args_name, args_name_stripped)| {
args_name
.iter()
.zip(args_name_stripped)
.map(|(args_name, args_name_stripped)| {
if args_name == args_name_stripped {
quote::quote!( #ref_tok #args_name )
} else {
quote::quote!( #args_name_stripped: #ref_tok #args_name )
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
let args_name_pattern = make_args_name_pattern(None);
let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref)));
let args_type = methods
.iter()
.map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_compact_attr = methods.iter().map(|method| {
method
.args
.iter()
.map(|(is_compact, _, type_)| {
if *is_compact {
quote::quote_spanned!(type_.span() => #[codec(compact)] )
} else {
quote::quote!()
}
})
.collect::<Vec<_>>()
});
let default_docs =
[syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")];
let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] };
let maybe_compile_error = if def.call.is_none() {
quote::quote! {
compile_error!(concat!(
"`",
stringify!($pallet_name),
"` does not have #[pallet::call] defined, perhaps you should remove `Call` from \
construct_runtime?",
));
}
} else {
proc_macro2::TokenStream::new()
};
let count = COUNTER.with(|counter| counter.borrow_mut().inc());
let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span);
let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" };
// Wrap all calls inside of storage layers
if let Some(syn::Item::Impl(item_impl)) = def
.call
.as_ref()
.map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index])
{
item_impl.items.iter_mut().for_each(|i| {
if let syn::ImplItem::Fn(method) = i {
let block = &method.block;
method.block = syn::parse_quote! {{
// We execute all dispatchable in a new storage layer, allowing them
// to return an error at any point, and undoing any storage changes.
#frame_support::storage::with_storage_layer(|| #block)
}};
}
});
}
// Extracts #[allow] attributes, necessary so that we don't run into compiler warnings
let maybe_allow_attrs = methods
.iter()
.map(|method| {
method
.attrs
.iter()
.find(|attr| attr.path().is_ident("allow"))
.map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream())
})
.collect::<Vec<_>>();
quote::quote_spanned!(span =>
mod warnings {
#(
#call_index_warnings
)*
#(
#weight_warnings
)*
}
#[doc(hidden)]
pub mod __substrate_call_check {
#[macro_export]
#[doc(hidden)]
macro_rules! #macro_ident {
($pallet_name:ident) => {
#maybe_compile_error
};
}
#[doc(hidden)]
pub use #macro_ident as is_call_part_defined;
}
#( #[doc = #docs] )*
#[derive(
#frame_support::RuntimeDebugNoBound,
#frame_support::CloneNoBound,
#frame_support::EqNoBound,
#frame_support::PartialEqNoBound,
#frame_support::__private::codec::Encode,
#frame_support::__private::codec::Decode,
#frame_support::__private::scale_info::TypeInfo,
)]
#[codec(encode_bound())]
#[codec(decode_bound())]
#[scale_info(skip_type_params(#type_use_gen), capture_docs = #capture_docs)]
#[allow(non_camel_case_types)]
pub enum #call_ident<#type_decl_bounded_gen> #where_clause {
#[doc(hidden)]
#[codec(skip)]
__Ignore(
#frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen,)>,
#frame_support::Never,
),
#(
#[doc = #fn_doc]
#[codec(index = #call_index)]
#fn_name {
#(
#[allow(missing_docs)]
#args_compact_attr #args_name_stripped: #args_type
),*
},
)*
}
impl<#type_impl_gen> #call_ident<#type_use_gen> #where_clause {
#(
#[doc = #new_call_variant_doc]
pub fn #new_call_variant_fn_name(
#( #args_name_stripped: #args_type ),*
) -> Self {
Self::#fn_name {
#( #args_name_stripped ),*
}
}
)*
}
impl<#type_impl_gen> #frame_support::dispatch::GetDispatchInfo
for #call_ident<#type_use_gen>
#where_clause
{
fn get_dispatch_info(&self) -> #frame_support::dispatch::DispatchInfo {
match *self {
#(
Self::#fn_name { #( #args_name_pattern_ref, )* } => {
let __pallet_base_weight = #fn_weight;
let __pallet_weight = <
dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )>
>::weigh_data(&__pallet_base_weight, ( #( #args_name, )* ));
let __pallet_class = <
dyn #frame_support::dispatch::ClassifyDispatch<
( #( & #args_type, )* )
>
>::classify_dispatch(&__pallet_base_weight, ( #( #args_name, )* ));
let __pallet_pays_fee = <
dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )>
>::pays_fee(&__pallet_base_weight, ( #( #args_name, )* ));
#frame_support::dispatch::DispatchInfo {
weight: __pallet_weight,
class: __pallet_class,
pays_fee: __pallet_pays_fee,
}
},
)*
Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"),
}
}
}
impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen>
#where_clause
{
fn get_call_name(&self) -> &'static str {
match *self {
#( Self::#fn_name {.. } => stringify!(#fn_name), )*
Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."),
}
}
fn get_call_names() -> &'static [&'static str] {
&[ #( stringify!(#fn_name), )* ]
}
}
impl<#type_impl_gen> #frame_support::dispatch::GetCallIndex for #call_ident<#type_use_gen>
#where_clause
{
fn get_call_index(&self) -> u8 {
match *self {
#( Self::#fn_name {.. } => #call_index, )*
Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."),
}
}
fn get_call_indices() -> &'static [u8] {
&[ #( #call_index, )* ]
}
}
impl<#type_impl_gen> #frame_support::traits::UnfilteredDispatchable
for #call_ident<#type_use_gen>
#where_clause
{
type RuntimeOrigin = #frame_system::pallet_prelude::OriginFor<T>;
fn dispatch_bypass_filter(
self,
origin: Self::RuntimeOrigin
) -> #frame_support::dispatch::DispatchResultWithPostInfo {
#frame_support::dispatch_context::run_in_context(|| {
match self {
#(
Self::#fn_name { #( #args_name_pattern, )* } => {
#frame_support::__private::sp_tracing::enter_span!(
#frame_support::__private::sp_tracing::trace_span!(stringify!(#fn_name))
);
#maybe_allow_attrs
<#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* )
.map(Into::into).map_err(Into::into)
},
)*
Self::__Ignore(_, _) => {
let _ = origin; // Use origin for empty Call enum
unreachable!("__PhantomItem cannot be used.");
},
}
})
}
}
impl<#type_impl_gen> #frame_support::dispatch::Callable<T> for #pallet_ident<#type_use_gen>
#where_clause
{
type RuntimeCall = #call_ident<#type_use_gen>;
}
impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause {
#[doc(hidden)]
pub fn call_functions() -> #frame_support::__private::metadata_ir::PalletCallMetadataIR {
#frame_support::__private::scale_info::meta_type::<#call_ident<#type_use_gen>>().into()
}
}
)
} | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
day13.rs | //! # --- Day 13: Shuttle Search ---
//!
//! Your ferry can make it safely to a nearby port, but it won't get much
//! further. When you call to book another ship, you discover that no ships
//! embark from that port to your vacation island. You'll need to get from the
//! port to the nearest airport.
//!
//! Fortunately, a shuttle bus service is available to bring you from the sea
//! port to the airport! Each bus has an ID number that also indicates **how
//! often the bus leaves for the airport**.
//!
//! Bus schedules are defined based on a **timestamp** that measures the
//! **number of minutes** since some fixed reference point in the past. At
//! timestamp `0`, every bus simultaneously departed from the sea port. After
//! that, each bus travels to the airport, then various other locations, and
//! finally returns to the sea port to repeat its journey forever.
//!
//! The time this loop takes a particular bus is also its ID number: the bus
//! with ID `5` departs from the sea port at timestamps `0`, `5`, `10`, `15`,
//! and so on. The bus with ID `11` departs at `0`, `11`, `22`, `33`, and so on.
//! If you are there when the bus departs, you can ride that bus to the airport!
//!
//! Your notes (your puzzle input) consist of two lines. The first line is your
//! estimate of **the earliest timestamp you could depart on a bus**. The second
//! line lists the bus IDs that are in service according to the shuttle company;
//! entries that show `x` must be out of service, so you decide to ignore them.
//!
//! To save time once you arrive, your goal is to figure out **the earliest bus
//! you can take to the airport**. (There will be exactly one such bus.)
//!
//! For example, suppose you have the following notes:
//!
//! ```
//! 939
//! 7,13,x,x,59,x,31,19
//! ```
//!
//! Here, the earliest timestamp you could depart is `939`, and the bus IDs in
//! service are `7`, `13`, `59`, `31`, and `19`. Near timestamp `939`, these bus
//! IDs depart at the times marked `D`:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 929 . . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn | (input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect::<Vec<Bus>>();
(depart_time, shuttles)
}
fn calc_wait_time(depart_time: usize, shuttles: &[Bus]) -> usize {
let buses: Vec<usize> = shuttles.iter().map(|x| x.id).collect();
let (idx, min_wait) = buses
.iter()
.map(|&x| x - depart_time % x)
.enumerate()
.min_by_key(|&x| x.1)
.unwrap();
let bus_id = buses[idx];
bus_id * min_wait
}
/// implement <https://github.com/mstksg/advent-of-code-2020/blob/master/reflections.md#day-13>
fn clac_contest(shuttles: &[Bus]) -> usize {
let mut earliest = 0;
let mut period = 1;
for Bus { serial, id } in shuttles {
let mut n = 0;
loop {
let candidate = earliest + period * n;
if (candidate + serial) % id == 0 {
earliest = candidate;
period *= id;
break;
} else {
n += 1;
}
}
}
earliest
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file.");
let (depart_time, shuttles) = parse_input(&input);
let prod = calc_wait_time(depart_time, &shuttles);
println!(
"The ID of the earliest bus multiplied by the wait minutes is {}",
prod
);
let earliest = clac_contest(&shuttles);
println!(
"The earliest timestamp listed bus IDs depart match with offsets is {}",
earliest
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_ARRAY: [Bus; 5] = [
Bus { serial: 0, id: 7 },
Bus { serial: 1, id: 13 },
Bus { serial: 4, id: 59 },
Bus { serial: 6, id: 31 },
Bus { serial: 7, id: 19 },
];
#[test]
fn test_parse_input() {
let input = "939
7,13,x,x,59,x,31,19
";
let (depart_time, shuttles) = parse_input(input);
assert_eq!(depart_time, 939);
assert_eq!(shuttles, TEST_ARRAY.to_vec());
}
#[test]
fn test_wait_time() {
let prod = calc_wait_time(939, &TEST_ARRAY);
assert_eq!(prod, 295);
}
#[test]
fn test_contest() {
let earliest = clac_contest(&TEST_ARRAY);
assert_eq!(earliest, 1068781);
}
}
| parse_input | identifier_name |
day13.rs | //! # --- Day 13: Shuttle Search ---
//!
//! Your ferry can make it safely to a nearby port, but it won't get much
//! further. When you call to book another ship, you discover that no ships
//! embark from that port to your vacation island. You'll need to get from the
//! port to the nearest airport.
//!
//! Fortunately, a shuttle bus service is available to bring you from the sea
//! port to the airport! Each bus has an ID number that also indicates **how
//! often the bus leaves for the airport**.
//!
//! Bus schedules are defined based on a **timestamp** that measures the
//! **number of minutes** since some fixed reference point in the past. At
//! timestamp `0`, every bus simultaneously departed from the sea port. After
//! that, each bus travels to the airport, then various other locations, and
//! finally returns to the sea port to repeat its journey forever.
//!
//! The time this loop takes a particular bus is also its ID number: the bus
//! with ID `5` departs from the sea port at timestamps `0`, `5`, `10`, `15`,
//! and so on. The bus with ID `11` departs at `0`, `11`, `22`, `33`, and so on.
//! If you are there when the bus departs, you can ride that bus to the airport!
//!
//! Your notes (your puzzle input) consist of two lines. The first line is your
//! estimate of **the earliest timestamp you could depart on a bus**. The second
//! line lists the bus IDs that are in service according to the shuttle company;
//! entries that show `x` must be out of service, so you decide to ignore them.
//!
//! To save time once you arrive, your goal is to figure out **the earliest bus
//! you can take to the airport**. (There will be exactly one such bus.)
//!
//! For example, suppose you have the following notes:
//!
//! ```
//! 939
//! 7,13,x,x,59,x,31,19
//! ```
//!
//! Here, the earliest timestamp you could depart is `939`, and the bus IDs in
//! service are `7`, `13`, `59`, `31`, and `19`. Near timestamp `939`, these bus
//! IDs depart at the times marked `D`:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 929 . . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn parse_input(input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect::<Vec<Bus>>();
(depart_time, shuttles)
}
fn calc_wait_time(depart_time: usize, shuttles: &[Bus]) -> usize {
let buses: Vec<usize> = shuttles.iter().map(|x| x.id).collect();
let (idx, min_wait) = buses
.iter()
.map(|&x| x - depart_time % x)
.enumerate()
.min_by_key(|&x| x.1)
.unwrap();
let bus_id = buses[idx];
bus_id * min_wait
}
/// implement <https://github.com/mstksg/advent-of-code-2020/blob/master/reflections.md#day-13>
fn clac_contest(shuttles: &[Bus]) -> usize {
let mut earliest = 0;
let mut period = 1;
for Bus { serial, id } in shuttles {
let mut n = 0;
loop {
let candidate = earliest + period * n;
if (candidate + serial) % id == 0 {
earliest = candidate;
period *= id;
break;
} else |
}
}
earliest
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file.");
let (depart_time, shuttles) = parse_input(&input);
let prod = calc_wait_time(depart_time, &shuttles);
println!(
"The ID of the earliest bus multiplied by the wait minutes is {}",
prod
);
let earliest = clac_contest(&shuttles);
println!(
"The earliest timestamp listed bus IDs depart match with offsets is {}",
earliest
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_ARRAY: [Bus; 5] = [
Bus { serial: 0, id: 7 },
Bus { serial: 1, id: 13 },
Bus { serial: 4, id: 59 },
Bus { serial: 6, id: 31 },
Bus { serial: 7, id: 19 },
];
#[test]
fn test_parse_input() {
let input = "939
7,13,x,x,59,x,31,19
";
let (depart_time, shuttles) = parse_input(input);
assert_eq!(depart_time, 939);
assert_eq!(shuttles, TEST_ARRAY.to_vec());
}
#[test]
fn test_wait_time() {
let prod = calc_wait_time(939, &TEST_ARRAY);
assert_eq!(prod, 295);
}
#[test]
fn test_contest() {
let earliest = clac_contest(&TEST_ARRAY);
assert_eq!(earliest, 1068781);
}
}
| {
n += 1;
} | conditional_block |
day13.rs | //! # --- Day 13: Shuttle Search ---
//!
//! Your ferry can make it safely to a nearby port, but it won't get much
//! further. When you call to book another ship, you discover that no ships
//! embark from that port to your vacation island. You'll need to get from the
//! port to the nearest airport.
//!
//! Fortunately, a shuttle bus service is available to bring you from the sea
//! port to the airport! Each bus has an ID number that also indicates **how
//! often the bus leaves for the airport**.
//!
//! Bus schedules are defined based on a **timestamp** that measures the
//! **number of minutes** since some fixed reference point in the past. At
//! timestamp `0`, every bus simultaneously departed from the sea port. After
//! that, each bus travels to the airport, then various other locations, and
//! finally returns to the sea port to repeat its journey forever.
//!
//! The time this loop takes a particular bus is also its ID number: the bus
//! with ID `5` departs from the sea port at timestamps `0`, `5`, `10`, `15`,
//! and so on. The bus with ID `11` departs at `0`, `11`, `22`, `33`, and so on.
//! If you are there when the bus departs, you can ride that bus to the airport!
//!
//! Your notes (your puzzle input) consist of two lines. The first line is your
//! estimate of **the earliest timestamp you could depart on a bus**. The second
//! line lists the bus IDs that are in service according to the shuttle company;
//! entries that show `x` must be out of service, so you decide to ignore them.
//!
//! To save time once you arrive, your goal is to figure out **the earliest bus
//! you can take to the airport**. (There will be exactly one such bus.)
//!
//! For example, suppose you have the following notes:
//!
//! ```
//! 939
//! 7,13,x,x,59,x,31,19
//! ``` | //!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 929 . . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn parse_input(input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect::<Vec<Bus>>();
(depart_time, shuttles)
}
fn calc_wait_time(depart_time: usize, shuttles: &[Bus]) -> usize {
let buses: Vec<usize> = shuttles.iter().map(|x| x.id).collect();
let (idx, min_wait) = buses
.iter()
.map(|&x| x - depart_time % x)
.enumerate()
.min_by_key(|&x| x.1)
.unwrap();
let bus_id = buses[idx];
bus_id * min_wait
}
/// implement <https://github.com/mstksg/advent-of-code-2020/blob/master/reflections.md#day-13>
fn clac_contest(shuttles: &[Bus]) -> usize {
let mut earliest = 0;
let mut period = 1;
for Bus { serial, id } in shuttles {
let mut n = 0;
loop {
let candidate = earliest + period * n;
if (candidate + serial) % id == 0 {
earliest = candidate;
period *= id;
break;
} else {
n += 1;
}
}
}
earliest
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file.");
let (depart_time, shuttles) = parse_input(&input);
let prod = calc_wait_time(depart_time, &shuttles);
println!(
"The ID of the earliest bus multiplied by the wait minutes is {}",
prod
);
let earliest = clac_contest(&shuttles);
println!(
"The earliest timestamp listed bus IDs depart match with offsets is {}",
earliest
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_ARRAY: [Bus; 5] = [
Bus { serial: 0, id: 7 },
Bus { serial: 1, id: 13 },
Bus { serial: 4, id: 59 },
Bus { serial: 6, id: 31 },
Bus { serial: 7, id: 19 },
];
#[test]
fn test_parse_input() {
let input = "939
7,13,x,x,59,x,31,19
";
let (depart_time, shuttles) = parse_input(input);
assert_eq!(depart_time, 939);
assert_eq!(shuttles, TEST_ARRAY.to_vec());
}
#[test]
fn test_wait_time() {
let prod = calc_wait_time(939, &TEST_ARRAY);
assert_eq!(prod, 295);
}
#[test]
fn test_contest() {
let earliest = clac_contest(&TEST_ARRAY);
assert_eq!(earliest, 1068781);
}
} | //!
//! Here, the earliest timestamp you could depart is `939`, and the bus IDs in
//! service are `7`, `13`, `59`, `31`, and `19`. Near timestamp `939`, these bus
//! IDs depart at the times marked `D`: | random_line_split |
day13.rs | //! # --- Day 13: Shuttle Search ---
//!
//! Your ferry can make it safely to a nearby port, but it won't get much
//! further. When you call to book another ship, you discover that no ships
//! embark from that port to your vacation island. You'll need to get from the
//! port to the nearest airport.
//!
//! Fortunately, a shuttle bus service is available to bring you from the sea
//! port to the airport! Each bus has an ID number that also indicates **how
//! often the bus leaves for the airport**.
//!
//! Bus schedules are defined based on a **timestamp** that measures the
//! **number of minutes** since some fixed reference point in the past. At
//! timestamp `0`, every bus simultaneously departed from the sea port. After
//! that, each bus travels to the airport, then various other locations, and
//! finally returns to the sea port to repeat its journey forever.
//!
//! The time this loop takes a particular bus is also its ID number: the bus
//! with ID `5` departs from the sea port at timestamps `0`, `5`, `10`, `15`,
//! and so on. The bus with ID `11` departs at `0`, `11`, `22`, `33`, and so on.
//! If you are there when the bus departs, you can ride that bus to the airport!
//!
//! Your notes (your puzzle input) consist of two lines. The first line is your
//! estimate of **the earliest timestamp you could depart on a bus**. The second
//! line lists the bus IDs that are in service according to the shuttle company;
//! entries that show `x` must be out of service, so you decide to ignore them.
//!
//! To save time once you arrive, your goal is to figure out **the earliest bus
//! you can take to the airport**. (There will be exactly one such bus.)
//!
//! For example, suppose you have the following notes:
//!
//! ```
//! 939
//! 7,13,x,x,59,x,31,19
//! ```
//!
//! Here, the earliest timestamp you could depart is `939`, and the bus IDs in
//! service are `7`, `13`, `59`, `31`, and `19`. Near timestamp `939`, these bus
//! IDs depart at the times marked `D`:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 929 . . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn parse_input(input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect::<Vec<Bus>>();
(depart_time, shuttles)
}
fn calc_wait_time(depart_time: usize, shuttles: &[Bus]) -> usize {
let buses: Vec<usize> = shuttles.iter().map(|x| x.id).collect();
let (idx, min_wait) = buses
.iter()
.map(|&x| x - depart_time % x)
.enumerate()
.min_by_key(|&x| x.1)
.unwrap();
let bus_id = buses[idx];
bus_id * min_wait
}
/// implement <https://github.com/mstksg/advent-of-code-2020/blob/master/reflections.md#day-13>
fn clac_contest(shuttles: &[Bus]) -> usize {
let mut earliest = 0;
let mut period = 1;
for Bus { serial, id } in shuttles {
let mut n = 0;
loop {
let candidate = earliest + period * n;
if (candidate + serial) % id == 0 {
earliest = candidate;
period *= id;
break;
} else {
n += 1;
}
}
}
earliest
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file.");
let (depart_time, shuttles) = parse_input(&input);
let prod = calc_wait_time(depart_time, &shuttles);
println!(
"The ID of the earliest bus multiplied by the wait minutes is {}",
prod
);
let earliest = clac_contest(&shuttles);
println!(
"The earliest timestamp listed bus IDs depart match with offsets is {}",
earliest
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_ARRAY: [Bus; 5] = [
Bus { serial: 0, id: 7 },
Bus { serial: 1, id: 13 },
Bus { serial: 4, id: 59 },
Bus { serial: 6, id: 31 },
Bus { serial: 7, id: 19 },
];
#[test]
fn test_parse_input() {
let input = "939
7,13,x,x,59,x,31,19
";
let (depart_time, shuttles) = parse_input(input);
assert_eq!(depart_time, 939);
assert_eq!(shuttles, TEST_ARRAY.to_vec());
}
#[test]
fn test_wait_time() {
let prod = calc_wait_time(939, &TEST_ARRAY);
assert_eq!(prod, 295);
}
#[test]
fn test_contest() |
}
| {
let earliest = clac_contest(&TEST_ARRAY);
assert_eq!(earliest, 1068781);
} | identifier_body |
samplesheet.rs | //! This module contains tools to build sample sheets from lists of samples,
//! and to export sample sheets to ARResT-compatible formats.
use std::{collections::HashMap, convert::TryInto, fs::File, io::Write, path::{Path, PathBuf}};
use std::error::Error;
use crate::{models, vaultdb::MatchStatus};
use calamine::{Reader, Xlsx, open_workbook};
use diesel::{PgConnection, QueryDsl, RunQueryDsl, ExpressionMethods};
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
/// A catch-all error type
type Result<T> = std::result::Result<T, Box<dyn Error>>;
/// A sample sheet containing a list of samples
#[derive(Debug)]
pub struct SampleSheet {
/// The entries of the sample sheet
pub entries: Vec<SampleSheetEntry>,
}
/// An entry of a SampleSheet
#[derive(Debug,Default)]
pub struct SampleSheetEntry {
/// Sample data accoring to the database
pub model: models::Sample,
/// Columns usually imported from an external sample sheet.
/// These entries can overlap with basic data. During export,
/// the `override` settings control which one to use.
pub extra_cols: HashMap<String, String>
}
/// Convert DNA numbers to XX-XXXXX format, will be filled up with zeros if necessary.
///
/// If a DNA number is in a supported format, it will be normalized to a two-digit year
/// enconding, a dash sign `-` and a five-digit number. A supported input format
/// * may or may not start with a `D-` prefix
/// * must contain a number, dash, number sequence
///
/// If `dnanr` is not in a supported format, `None` is returned.
///
/// # Example
/// ```
/// assert_eq!(Some("01-12345"), normalize_dna_nr("01-12345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("01-345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("D-1-345"))
/// assert_eq!(None, normalize_dna_nr("asdfjklö"))
/// ```
pub(crate) fn normalize_dna_nr(dnanr: &str) -> Option<String> {
let dnanr = dnanr.strip_prefix("D-").unwrap_or(dnanr);
let parts: Vec<&str> = dnanr.split('-').collect();
if parts.len()!= 2 {
return None;
}
Some(format!(
"{:02}-{:05}",
parts[0].parse::<u32>().unwrap(),
parts[1].parse::<u32>().unwrap()
))
}
impl SampleSheetEntry {
pub fn _run_path(&self, db: &PgConnection) -> Result<PathBuf> {
use crate::schema::run;
let p: String = run::table.select(run::path).filter(run::name.eq(&self.model.run)).get_result(db)?;
Ok(PathBuf::from(p))
}
pub fn fastq_paths(&self, db: &PgConnection) -> Result<Vec<String>> {
use crate::schema::fastq;
Ok(fastq::table.select(fastq::filename).filter(fastq::sample_id.eq(self.model.id)).load(db)?)
}
// generate a short but unique string representation of the run
// to keep samples with same characteristics in different runs apart
fn get_unique_run_id(&self) -> String {
let underscore_parts: Vec<&str> = self.model.run.split('_').collect();
let dash_parts: Vec<&str> = self.model.run.split('-').collect();
format!("{}-{}", underscore_parts[0], dash_parts[dash_parts.len()-1])
}
}
impl From<models::Sample> for SampleSheetEntry {
fn from(s: models::Sample) -> Self {
SampleSheetEntry {
model: s,
extra_cols: HashMap::new()
}
}
}
impl From<Vec<models::Sample>> for SampleSheet {
fn from(ss: Vec<models::Sample>) -> Self {
SampleSheet {
entries: ss.into_iter().map(|s| s.into()).collect()
}
}
}
fn extract_from_zip(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let zipfile = std::fs::File::open(path)?;
let mut zip = zip::ZipArchive::new(zipfile)?;
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut fastq = zip.by_name(f)?;
let target = PathBuf::from(fastq.name());
let mut local_path = PathBuf::from(targetdir);
local_path.push(prefix.clone() + &target.file_name().unwrap().to_string_lossy().to_string());
let mut targetfile = std::fs::File::create(local_path)?;
std::io::copy(&mut fastq, &mut targetfile)?;
}
Ok(())
}
fn extract_from_dir(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut src = path.to_path_buf();
src.push(f);
let mut target = PathBuf::from(targetdir);
target.push(prefix.clone() + &PathBuf::from(f).file_name().unwrap().to_string_lossy().to_string());
std::fs::copy(&src, &target)?;
}
Ok(())
}
impl SampleSheet {
pub fn new() -> Self {
SampleSheet {
entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if!all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e in &self.entries {
// write basic data points
for (col_idx, col) in basic_header.iter().enumerate() {
let last = col_idx+1 == basic_header.len();
if overrides.iter().any(|x| &x.as_ref() == col) {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
} else {
match *col {
"Sample" => {
if has_multiple_runs {
csv += &format!("{}-{}", e.get_unique_run_id(), e.model.name);
} else {
csv += &e.model.name;
}
},
"run" => { csv += &e.model.run; },
"DNA nr" => { csv += &e.model.dna_nr.as_ref().unwrap_or(&String::from("")); },
"primer set" => { csv += e.model.primer_set.as_ref().unwrap_or(&String::from("")); },
"project" => { csv += &e.model.project.as_ref().unwrap_or(&String::from("")); },
"LIMS ID" => { csv += &e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")); },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
csv += &cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*col) {
csv += cells
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
if!last {
csv += separator;
}
}
if!all_sans_basic.is_empty() {
csv += separator;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
if col_idx+1 < all_sans_basic.len() {
csv += separator;
}
}
csv += "\n";
}
File::create(outfile)?.write_all(csv.as_bytes())?;
Ok(())
}
pub fn write_xlsx<T: AsRef<str> + PartialEq> (&self, overrides: &[T], outfile: &Path) -> Result<()> { | // write header
for (col, title) in basic_header.iter().chain(all_sans_basic.iter()).enumerate() {
sheet.write_string(0, col.clamp(0, u16::MAX.into()) as u16, title, None)?;
}
let has_multiple_runs = self.has_multiple_runs();
for (row, e) in self.entries.iter().enumerate() {
let row: u32 = (row + 1).try_into().unwrap();
// write basic data points
for (col_idx, colname) in basic_header.iter().enumerate() {
let col_idx: u16 = col_idx.try_into().unwrap();
let val = if overrides.iter().any(|x| &x.as_ref() == colname) {
e.extra_cols.get(*colname).unwrap_or(&String::from("")).to_string()
} else {
match *colname {
"Sample" => {
if has_multiple_runs {
format!("{}-{}", e.get_unique_run_id(), e.model.name)
} else {
e.model.name.to_string()
}
},
"run" => { e.model.run.to_string() },
"DNA nr" => { e.model.dna_nr.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"primer set" => { e.model.primer_set.as_ref().unwrap_or(&String::from("")).to_string() },
"project" => { e.model.project.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"LIMS ID" => { e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")) },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*colname) {
cells.to_string()
} else {
String::from("")
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
sheet.write_string(row, col_idx, &val, None)?;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
let col_idx: u16 = (basic_header.len() + col_idx).try_into().unwrap();
sheet.write_string(row, col_idx, e.extra_cols.get(*col).unwrap_or(&String::from("")), None)?;
}
}
Ok(())
}
}
|
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// set up an empty file
let workbook = xlsxwriter::Workbook::new(outfile.to_str().unwrap());
let mut sheet = workbook.add_worksheet(None)?;
| identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.