file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1!= byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size); | test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn craft_cbc_admin_token(cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block); | random_line_split |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data | let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1!= byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn craft_cbc_admin_token(cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10]; | identifier_body |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block |
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1!= byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn craft_cbc_admin_token(cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | {
suffix.push(byte as u8);
continue 'outer;
} | conditional_block |
lib.rs | //! # sunvox-sys
//!
//! FFI bindings to the Sunvox library (http://warmplace.ru/soft/sunvox).
// --- Crate attributes --- //
#![allow(non_camel_case_types)]
// --- ==== --- //
// --- External crates --- //
extern crate libc;
// --- ==== --- //
// --- Use --- //
use libc::{c_void, c_int, c_uint, c_char, c_uchar, c_short, c_ushort};
// --- ==== --- //
/// Single note off.
pub const NOTECMD_NOTE_OFF: c_int = 128;
/// Notes of all synths off.
pub const NOTECMD_ALL_NOTES_OFF: c_int = 129;
/// Stop and clean all synths.
pub const NOTECMD_CLEAN_SYNTHS: c_int = 130;
pub const NOTECMD_STOP: c_int = 131;
pub const NOTECMD_PLAY: c_int = 132;
// I can't find these in the official header file, but they're defined in
// https://github.com/metrasynth/sunvox-dll-python/blob/master/sunvox/types.py
/// Change the pitch of a currently playing note.
pub const NOTECMD_SET_PITCH: c_int = 133;
/// Apply effect in this note cell to the corresponding one in the previous track.
pub const NOTECMD_PREV_TRACK: c_int = 134;
/// A single note cell in a pattern.
#[repr(C)]
#[derive(Clone, Debug)]
pub struct | {
/// The note column.
///
/// - 0: Nothing.
/// - 1 to 127 inclusive: A normal note.
/// - 128+: See the `NOTECMD` constants.
pub note: c_uchar,
/// The velocity column (note velocity).
///
/// - 0: Empty (default).
/// - 1 to 129 inclusive: The specified velocity + 1.
pub vel: c_uchar,
/// The module column (module to affect).
///
/// - 0: Empty (none).
/// - 1 to 255 inclusive: The specified module + 1.
pub module: c_uchar,
/// Padding.
pub nothing: c_uchar,
/// The value of the controller/effect column.
///
/// Interpreted as a hexadecimal number, the first two digits are the
/// controller of the selected module to affect, and the last two digits
/// are the number of an effect. Set a pair of digits to zero to
/// ignore that part.
pub ctl: c_ushort,
/// The value of the controller/effect parameter column.
pub ctl_val: c_ushort,
}
/// Supresses debug output from the SunVox library.
pub const SV_INIT_FLAG_NO_DEBUG_OUTPUT: c_uint = 1 << 0;
/// Interaction with sound card is on the user side.
///
/// See `sv_audio_callback()`.
pub const SV_INIT_FLAG_USER_AUDIO_CALLBACK: c_uint = 1 << 1;
/// Audio is signed 16-bit (`c_short`).
pub const SV_INIT_FLAG_AUDIO_INT16: c_uint = 1 << 2;
/// Audio is float (`c_float`).
pub const SV_INIT_FLAG_AUDIO_FLOAT32: c_uint = 1 << 3;
/// Audio callback and song modification functions are in a single thread.
pub const SV_INIT_FLAG_ONE_THREAD: c_uint = 1 << 4;
pub const SV_MODULE_FLAG_EXISTS: c_int = 1;
pub const SV_MODULE_FLAG_EFFECT: c_int = 2;
pub const SV_MODULE_INPUTS_OFF: c_int = 16;
pub const SV_MODULE_INPUTS_MASK: c_int = 255 << SV_MODULE_INPUTS_OFF;
pub const SV_MODULE_OUTPUTS_OFF: c_int = 16 + 8;
pub const SV_MODULE_OUTPUTS_MASK: c_int = 255 << SV_MODULE_OUTPUTS_OFF;
pub const SV_STYPE_INT16: c_int = 0;
pub const SV_STYPE_INT32: c_int = 1;
pub const SV_STYPE_FLOAT32: c_int = 2;
pub const SV_STYPE_FLOAT64: c_int = 3;
#[link(name = "sunvox")]
extern "C" {
/// Gets the next piece of SunVox audio.
///
/// With `sv_audio_callback()` you can ignore the built-in SunVox sound
/// output mechanism and use some other sound system. Set the
/// `SV_INIT_FLAG_USER_AUDIO_CALLBACK` flag when calling `sv_init()` if
/// you want to use this function.
///
/// # Parameters
///
/// - buf: Destination buffer. If `SV_INIT_FLAG_AUDIO_INT16` was passed to
/// `sv_init()`, this is a buffer of `c_short`s. If `SV_INIT_FLAG_AUDIO_FLOAT32`
/// was passed, this is a buffer of `c_float`s. Stereo data will be interleaved
/// in this buffer: LRLR... ; where the LR is one frame (Left+Right channels).
/// - frames: Number of frames in destination buffer.
/// - latency: Audio latency (in frames).
/// - out_time: Output time (in ticks).
///
/// The `out_time` parameter is elaborated on a little bit in this thread:
/// http://www.warmplace.ru/forum/viewtopic.php?f=12&t=4152
///
/// For normal use, pass the value of `sv_get_ticks()`, as detailed in that
/// thread.
pub fn sv_audio_callback(buf: *mut c_void,
frames: c_int,
latency: c_int,
out_time: c_uint)
-> c_int;
/// Opens a slot.
///
/// A slot is like an instance of the SunVox engine. Each slot can have a
/// single project loaded at a time. The library supports up to four slots,
/// 0 to 3 inclusive. This call appears to hang if called with a number
/// outside this range.
///
/// Returns 0 on success, -1 on failure. Failure conditions include the
/// slot already being open.
///
/// I say "like" an instance of the engine because I think all slots share
/// the same tick counter, which you can get by calling `sv_get_ticks()`.
pub fn sv_open_slot(slot: c_int) -> c_int;
/// Closes a slot. See `sv_open_slot()` for more details.
pub fn sv_close_slot(slot: c_int) -> c_int;
/// Locks a slot.
///
/// There are a few functions that need to be called between a
/// `sv_lock_slot()`/`sv_unlock_slot()` pair. These are marked with
/// "USE LOCK/UNLOCK!".
pub fn sv_lock_slot(slot: c_int) -> c_int;
/// Unlocks a slot. See `sv_lock_slot()` for more details.
pub fn sv_unlock_slot(slot: c_int) -> c_int;
/// Initializes the library.
///
/// The `flags` parameter takes either zero (for default options), or a
/// number of `SV_INIT_FLAG_xxx` constants ORed together.
pub fn sv_init(dev: *const c_char, freq: c_int, channels: c_int, flags: c_uint) -> c_int;
/// Deinitializes the library.
pub fn sv_deinit() -> c_int;
/// Gets the internal sample type of the SunVox engine.
///
/// Returns one of the `SV_STYPE_xxx` constants.
///
/// Use it to get the scope buffer type from `get_module_scope()` function.
pub fn sv_get_sample_type() -> c_int;
/// Loads a SunVox project file into the specified slot.
pub fn sv_load(slot: c_int, name: *const c_char) -> c_int;
/// Loads a SunVox project from file data in memory.
pub fn sv_load_from_memory(slot: c_int, data: *mut c_void, data_size: c_uint) -> c_int;
/// Starts playing the project from the current play cursor position.
pub fn sv_play(slot: c_int) -> c_int;
/// Starts playing the project from the beginning.
pub fn sv_play_from_beginning(slot: c_int) -> c_int;
/// Stops playing the project. The play cursor stays where it is.
pub fn sv_stop(slot: c_int) -> c_int;
/// Enables or disables autostop.
///
/// - 0: Disable autostop.
/// - 1: Enable autostop.
///
/// When disabled, the project plays in a loop.
pub fn sv_set_autostop(slot: c_int, autostop: c_int) -> c_int;
/// Gets whether the project is stopped (ie. not playing).
///
/// Returns 0 if it is playing, 1 if it is stopped.
pub fn sv_end_of_song(slot: c_int) -> c_int;
/// Rewinds the project to the beginning.
pub fn sv_rewind(slot: c_int, line_num: c_int) -> c_int;
/// Sets the volume of the project.
pub fn sv_volume(slot: c_int, vol: c_int) -> c_int;
/// Causes an event to occur as though it had been played in a pattern.
///
/// `track_num` is in the range 0 to 15 inclusive, and refers to the track
/// number in a special hidden pattern.
pub fn sv_send_event(slot: c_int,
track_num: c_int,
note: c_int,
vel: c_int,
module: c_int,
ctl: c_int,
ctl_val: c_int)
-> c_int;
/// Gets the line number of the play cursor.
pub fn sv_get_current_line(slot: c_int) -> c_int;
/// Gets the line number of the play in fixed point format: 27.5
///
/// TODO: Figure out exactly what this means.
/// I'm guessing it means 27 bits for the integer part and 5 bits for the
/// fractional part.
pub fn sv_get_current_line2(slot: c_int) -> c_int;
/// Gets the current signal level/amplitude for a given audio channel
/// in the range 0 to 255 inclusive.
pub fn sv_get_current_signal_level(slot: c_int, channel: c_int) -> c_int;
/// Gets the name of the currently loaded project.
///
/// Returns NULL if no project is loaded.
pub fn sv_get_song_name(slot: c_int) -> *const c_char;
/// Gets the Beats Per Minute of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_bpm(slot: c_int) -> c_int;
/// Gets the Ticks Per Line of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_tpl(slot: c_int) -> c_int;
/// Gets the currently loaded song's length in audio samples/frames.
pub fn sv_get_song_length_frames(slot: c_int) -> c_uint;
/// Gets the currently loaded song's length in pattern lines.
pub fn sv_get_song_length_lines(slot: c_int) -> c_uint;
/// Creates a new module. USE LOCK/UNLOCK!
pub fn sv_new_module(slot: c_int,
_type: *const c_char,
name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Removes the specified module. USE LOCK/UNLOCK!
pub fn sv_remove_module(slot: c_int, mod_num: c_int) -> c_int;
/// Connects the source to the destination. USE LOCK/UNLOCK!
pub fn sv_connect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Disconnects the source from the destination. USE LOCK/UNLOCK!
pub fn sv_disconnect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Loads a module.
///
/// Supported file formats: `sunsynth`, `xi`, `wav`, `aiff`
pub fn sv_load_module(slot: c_int,
file_name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Loads a sample to an existing Sampler.
///
/// To replace the whole sampler, set `sample_slot` to -1.
pub fn sv_sampler_load(slot: c_int,
sampler_module: c_int,
file_name: *const c_char,
sample_slot: c_int)
-> c_int;
/// Gets the number of modules in the currently loaded project?
///
/// Does not seem to directly correspond to that.
/// TODO: Investigate this.
///
/// Returns zero if no project is loaded.
pub fn sv_get_number_of_modules(slot: c_int) -> c_int;
pub fn sv_get_module_flags(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_inputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_outputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_name(slot: c_int, mod_num: c_int) -> *const c_char;
pub fn sv_get_module_xy(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_color(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_scope(slot: c_int,
mod_num: c_int,
channel: c_int,
buffer_offset: *mut c_int,
buffer_size: *mut c_int)
-> *mut c_void;
/// TODO
///
/// Return value: received number of samples (may be less or equal to `samples_to_read`).
pub fn sv_get_module_scope2(slot: c_int,
mod_num: c_int,
channel: c_int,
read_buf: *mut c_short,
samples_to_read: c_uint)
-> c_uint;
pub fn sv_get_number_of_module_ctls(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_ctl_name(slot: c_int, mod_num: c_int, ctl_num: c_int) -> *const c_char;
pub fn sv_get_module_ctl_value(slot: c_int,
mod_num: c_int,
ctl_num: c_int,
scaled: c_int)
-> c_int;
pub fn sv_get_number_of_patterns(slot: c_int) -> c_int;
pub fn sv_get_pattern_x(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_y(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_tracks(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_lines(slot: c_int, pat_num: c_int) -> c_int;
/// TODO
///
/// How to use sv_get_pattern_data():
///
/// - `int pat_tracks = sv_get_pattern_tracks(slot, pat_num);`
/// - `sunvox_note* data = sv_get_pattern_data(slot, pat_num);`
/// - `sunvox_note* n = &data[ line_number * pat_tracks + track_number ];`
/// -... and then do someting with note n
pub fn sv_get_pattern_data(slot: c_int, pat_num: c_int) -> *mut sunvox_note;
/// TODO
///
/// USE LOCK/UNLOCK!
pub fn sv_pattern_mute(slot: c_int, pat_num: c_int, mute: c_int) -> c_int;
/// Gets the current tick counter
///
/// Returns a value between 0 and 0xFFFFFFFF inclusive.
///
/// SunVox engine uses its own time space, measured in ticks.
pub fn sv_get_ticks() -> c_uint;
/// Gets the number of SunVox ticks per second.
pub fn sv_get_ticks_per_second() -> c_uint;
}
| sunvox_note | identifier_name |
lib.rs | //! # sunvox-sys
//!
//! FFI bindings to the Sunvox library (http://warmplace.ru/soft/sunvox).
// --- Crate attributes --- //
#![allow(non_camel_case_types)]
// --- ==== --- //
// --- External crates --- //
extern crate libc;
// --- ==== --- //
// --- Use --- //
use libc::{c_void, c_int, c_uint, c_char, c_uchar, c_short, c_ushort};
// --- ==== --- //
/// Single note off.
pub const NOTECMD_NOTE_OFF: c_int = 128;
/// Notes of all synths off.
pub const NOTECMD_ALL_NOTES_OFF: c_int = 129;
/// Stop and clean all synths.
pub const NOTECMD_CLEAN_SYNTHS: c_int = 130;
pub const NOTECMD_STOP: c_int = 131;
pub const NOTECMD_PLAY: c_int = 132;
// I can't find these in the official header file, but they're defined in
// https://github.com/metrasynth/sunvox-dll-python/blob/master/sunvox/types.py
/// Change the pitch of a currently playing note.
pub const NOTECMD_SET_PITCH: c_int = 133;
/// Apply effect in this note cell to the corresponding one in the previous track.
pub const NOTECMD_PREV_TRACK: c_int = 134;
|
/// A single note cell in a pattern.
#[repr(C)]
#[derive(Clone, Debug)]
pub struct sunvox_note {
/// The note column.
///
/// - 0: Nothing.
/// - 1 to 127 inclusive: A normal note.
/// - 128+: See the `NOTECMD` constants.
pub note: c_uchar,
/// The velocity column (note velocity).
///
/// - 0: Empty (default).
/// - 1 to 129 inclusive: The specified velocity + 1.
pub vel: c_uchar,
/// The module column (module to affect).
///
/// - 0: Empty (none).
/// - 1 to 255 inclusive: The specified module + 1.
pub module: c_uchar,
/// Padding.
pub nothing: c_uchar,
/// The value of the controller/effect column.
///
/// Interpreted as a hexadecimal number, the first two digits are the
/// controller of the selected module to affect, and the last two digits
/// are the number of an effect. Set a pair of digits to zero to
/// ignore that part.
pub ctl: c_ushort,
/// The value of the controller/effect parameter column.
pub ctl_val: c_ushort,
}
/// Supresses debug output from the SunVox library.
pub const SV_INIT_FLAG_NO_DEBUG_OUTPUT: c_uint = 1 << 0;
/// Interaction with sound card is on the user side.
///
/// See `sv_audio_callback()`.
pub const SV_INIT_FLAG_USER_AUDIO_CALLBACK: c_uint = 1 << 1;
/// Audio is signed 16-bit (`c_short`).
pub const SV_INIT_FLAG_AUDIO_INT16: c_uint = 1 << 2;
/// Audio is float (`c_float`).
pub const SV_INIT_FLAG_AUDIO_FLOAT32: c_uint = 1 << 3;
/// Audio callback and song modification functions are in a single thread.
pub const SV_INIT_FLAG_ONE_THREAD: c_uint = 1 << 4;
pub const SV_MODULE_FLAG_EXISTS: c_int = 1;
pub const SV_MODULE_FLAG_EFFECT: c_int = 2;
pub const SV_MODULE_INPUTS_OFF: c_int = 16;
pub const SV_MODULE_INPUTS_MASK: c_int = 255 << SV_MODULE_INPUTS_OFF;
pub const SV_MODULE_OUTPUTS_OFF: c_int = 16 + 8;
pub const SV_MODULE_OUTPUTS_MASK: c_int = 255 << SV_MODULE_OUTPUTS_OFF;
pub const SV_STYPE_INT16: c_int = 0;
pub const SV_STYPE_INT32: c_int = 1;
pub const SV_STYPE_FLOAT32: c_int = 2;
pub const SV_STYPE_FLOAT64: c_int = 3;
#[link(name = "sunvox")]
extern "C" {
/// Gets the next piece of SunVox audio.
///
/// With `sv_audio_callback()` you can ignore the built-in SunVox sound
/// output mechanism and use some other sound system. Set the
/// `SV_INIT_FLAG_USER_AUDIO_CALLBACK` flag when calling `sv_init()` if
/// you want to use this function.
///
/// # Parameters
///
/// - buf: Destination buffer. If `SV_INIT_FLAG_AUDIO_INT16` was passed to
/// `sv_init()`, this is a buffer of `c_short`s. If `SV_INIT_FLAG_AUDIO_FLOAT32`
/// was passed, this is a buffer of `c_float`s. Stereo data will be interleaved
/// in this buffer: LRLR... ; where the LR is one frame (Left+Right channels).
/// - frames: Number of frames in destination buffer.
/// - latency: Audio latency (in frames).
/// - out_time: Output time (in ticks).
///
/// The `out_time` parameter is elaborated on a little bit in this thread:
/// http://www.warmplace.ru/forum/viewtopic.php?f=12&t=4152
///
/// For normal use, pass the value of `sv_get_ticks()`, as detailed in that
/// thread.
pub fn sv_audio_callback(buf: *mut c_void,
frames: c_int,
latency: c_int,
out_time: c_uint)
-> c_int;
/// Opens a slot.
///
/// A slot is like an instance of the SunVox engine. Each slot can have a
/// single project loaded at a time. The library supports up to four slots,
/// 0 to 3 inclusive. This call appears to hang if called with a number
/// outside this range.
///
/// Returns 0 on success, -1 on failure. Failure conditions include the
/// slot already being open.
///
/// I say "like" an instance of the engine because I think all slots share
/// the same tick counter, which you can get by calling `sv_get_ticks()`.
pub fn sv_open_slot(slot: c_int) -> c_int;
/// Closes a slot. See `sv_open_slot()` for more details.
pub fn sv_close_slot(slot: c_int) -> c_int;
/// Locks a slot.
///
/// There are a few functions that need to be called between a
/// `sv_lock_slot()`/`sv_unlock_slot()` pair. These are marked with
/// "USE LOCK/UNLOCK!".
pub fn sv_lock_slot(slot: c_int) -> c_int;
/// Unlocks a slot. See `sv_lock_slot()` for more details.
pub fn sv_unlock_slot(slot: c_int) -> c_int;
/// Initializes the library.
///
/// The `flags` parameter takes either zero (for default options), or a
/// number of `SV_INIT_FLAG_xxx` constants ORed together.
pub fn sv_init(dev: *const c_char, freq: c_int, channels: c_int, flags: c_uint) -> c_int;
/// Deinitializes the library.
pub fn sv_deinit() -> c_int;
/// Gets the internal sample type of the SunVox engine.
///
/// Returns one of the `SV_STYPE_xxx` constants.
///
/// Use it to get the scope buffer type from `get_module_scope()` function.
pub fn sv_get_sample_type() -> c_int;
/// Loads a SunVox project file into the specified slot.
pub fn sv_load(slot: c_int, name: *const c_char) -> c_int;
/// Loads a SunVox project from file data in memory.
pub fn sv_load_from_memory(slot: c_int, data: *mut c_void, data_size: c_uint) -> c_int;
/// Starts playing the project from the current play cursor position.
pub fn sv_play(slot: c_int) -> c_int;
/// Starts playing the project from the beginning.
pub fn sv_play_from_beginning(slot: c_int) -> c_int;
/// Stops playing the project. The play cursor stays where it is.
pub fn sv_stop(slot: c_int) -> c_int;
/// Enables or disables autostop.
///
/// - 0: Disable autostop.
/// - 1: Enable autostop.
///
/// When disabled, the project plays in a loop.
pub fn sv_set_autostop(slot: c_int, autostop: c_int) -> c_int;
/// Gets whether the project is stopped (ie. not playing).
///
/// Returns 0 if it is playing, 1 if it is stopped.
pub fn sv_end_of_song(slot: c_int) -> c_int;
/// Rewinds the project to the beginning.
pub fn sv_rewind(slot: c_int, line_num: c_int) -> c_int;
/// Sets the volume of the project.
pub fn sv_volume(slot: c_int, vol: c_int) -> c_int;
/// Causes an event to occur as though it had been played in a pattern.
///
/// `track_num` is in the range 0 to 15 inclusive, and refers to the track
/// number in a special hidden pattern.
pub fn sv_send_event(slot: c_int,
track_num: c_int,
note: c_int,
vel: c_int,
module: c_int,
ctl: c_int,
ctl_val: c_int)
-> c_int;
/// Gets the line number of the play cursor.
pub fn sv_get_current_line(slot: c_int) -> c_int;
/// Gets the line number of the play in fixed point format: 27.5
///
/// TODO: Figure out exactly what this means.
/// I'm guessing it means 27 bits for the integer part and 5 bits for the
/// fractional part.
pub fn sv_get_current_line2(slot: c_int) -> c_int;
/// Gets the current signal level/amplitude for a given audio channel
/// in the range 0 to 255 inclusive.
pub fn sv_get_current_signal_level(slot: c_int, channel: c_int) -> c_int;
/// Gets the name of the currently loaded project.
///
/// Returns NULL if no project is loaded.
pub fn sv_get_song_name(slot: c_int) -> *const c_char;
/// Gets the Beats Per Minute of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_bpm(slot: c_int) -> c_int;
/// Gets the Ticks Per Line of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_tpl(slot: c_int) -> c_int;
/// Gets the currently loaded song's length in audio samples/frames.
pub fn sv_get_song_length_frames(slot: c_int) -> c_uint;
/// Gets the currently loaded song's length in pattern lines.
pub fn sv_get_song_length_lines(slot: c_int) -> c_uint;
/// Creates a new module. USE LOCK/UNLOCK!
pub fn sv_new_module(slot: c_int,
_type: *const c_char,
name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Removes the specified module. USE LOCK/UNLOCK!
pub fn sv_remove_module(slot: c_int, mod_num: c_int) -> c_int;
/// Connects the source to the destination. USE LOCK/UNLOCK!
pub fn sv_connect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Disconnects the source from the destination. USE LOCK/UNLOCK!
pub fn sv_disconnect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Loads a module.
///
/// Supported file formats: `sunsynth`, `xi`, `wav`, `aiff`
pub fn sv_load_module(slot: c_int,
file_name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Loads a sample to an existing Sampler.
///
/// To replace the whole sampler, set `sample_slot` to -1.
pub fn sv_sampler_load(slot: c_int,
sampler_module: c_int,
file_name: *const c_char,
sample_slot: c_int)
-> c_int;
/// Gets the number of modules in the currently loaded project?
///
/// Does not seem to directly correspond to that.
/// TODO: Investigate this.
///
/// Returns zero if no project is loaded.
pub fn sv_get_number_of_modules(slot: c_int) -> c_int;
pub fn sv_get_module_flags(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_inputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_outputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_name(slot: c_int, mod_num: c_int) -> *const c_char;
pub fn sv_get_module_xy(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_color(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_scope(slot: c_int,
mod_num: c_int,
channel: c_int,
buffer_offset: *mut c_int,
buffer_size: *mut c_int)
-> *mut c_void;
/// TODO
///
/// Return value: received number of samples (may be less or equal to `samples_to_read`).
pub fn sv_get_module_scope2(slot: c_int,
mod_num: c_int,
channel: c_int,
read_buf: *mut c_short,
samples_to_read: c_uint)
-> c_uint;
pub fn sv_get_number_of_module_ctls(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_ctl_name(slot: c_int, mod_num: c_int, ctl_num: c_int) -> *const c_char;
pub fn sv_get_module_ctl_value(slot: c_int,
mod_num: c_int,
ctl_num: c_int,
scaled: c_int)
-> c_int;
pub fn sv_get_number_of_patterns(slot: c_int) -> c_int;
pub fn sv_get_pattern_x(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_y(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_tracks(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_lines(slot: c_int, pat_num: c_int) -> c_int;
/// TODO
///
/// How to use sv_get_pattern_data():
///
/// - `int pat_tracks = sv_get_pattern_tracks(slot, pat_num);`
/// - `sunvox_note* data = sv_get_pattern_data(slot, pat_num);`
/// - `sunvox_note* n = &data[ line_number * pat_tracks + track_number ];`
/// -... and then do someting with note n
pub fn sv_get_pattern_data(slot: c_int, pat_num: c_int) -> *mut sunvox_note;
/// TODO
///
/// USE LOCK/UNLOCK!
pub fn sv_pattern_mute(slot: c_int, pat_num: c_int, mute: c_int) -> c_int;
/// Gets the current tick counter
///
/// Returns a value between 0 and 0xFFFFFFFF inclusive.
///
/// SunVox engine uses its own time space, measured in ticks.
pub fn sv_get_ticks() -> c_uint;
/// Gets the number of SunVox ticks per second.
pub fn sv_get_ticks_per_second() -> c_uint;
} | random_line_split |
|
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a,'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
} | fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
}
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
} | }
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> { | random_line_split |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a,'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else |
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
}
| {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
} | conditional_block |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a,'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> | }
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
}
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
}
| {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(()) | identifier_body |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a,'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn | (&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
}
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
}
| ctrl_status | identifier_name |
block.rs | use ::slice::Slice;
use ::errors::RubbleResult;
use ::util::coding;
use ::status::Status;
use ::comparator::SliceComparator;
use std::mem;
use std::str;
pub struct OwnedBlock {
data: Vec<u8>,
restart_offset: usize,
}
pub struct SliceBlock<'a> {
data: Slice<'a>,
restart_offset: usize,
}
pub trait Block {
fn get_size(&self) -> usize;
fn data(&self) -> Slice;
fn restart_offset(&self) -> usize;
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>;
fn num_restarts(data: Slice) -> usize
{
assert!(data.len() >= mem::size_of::<u32>());
let offset = data.len() - mem::size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
} | impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn compare(&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
{
self.value_offset + self.value_len
}
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared!= 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if!self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self.parse_next_key() && self.next_entry_offset() < self.restarts {
// Keep skipping
}
}
fn corruption_error(&mut self) {
self.current = self.restarts;
self.restart_index = self.num_restarts;
self.status = Status::Corruption("bad entry in block".into());
self.key = String::new();
}
fn parse_next_key(&mut self) -> bool {
self.current = self.next_entry_offset();
let p = &self.data[self.current..];
if p.len() == 0 {
// No more entries to return. Mark as invalid.
self.current = self.restarts;
self.restart_index = self.num_restarts;
return false;
}
let entry = match decode_entry(p) {
Ok(p) => p,
_ => {
self.corruption_error();
return false;
}
};
if self.key.len() < entry.shared as usize {
self.corruption_error();
return false;
}
self.key = str::from_utf8(&entry.new_slice[..entry.non_shared as usize])
.expect("Invalid UTF-8 key")
.to_owned();
self.value_offset = entry.non_shared as usize;
self.value_len = entry.value_length as usize;
while self.restart_index + 1 < self.num_restarts
&& self.get_restart_point(self.restart_index + 1) < self.current
{
self.restart_index += 1;
}
true
}
}
pub struct KVEntry {
key: String,
value: Vec<u8>,
}
impl<'a, T: SliceComparator> Iterator for BlockIterator<'a, T> {
// we will be counting with usize
type Item = KVEntry;
fn next(&mut self) -> Option<KVEntry> {
self.step();
match self.num_restarts {
0 => None,
_ => Some(KVEntry {
key: self.key(),
value: self.value().to_vec(),
})
}
}
} | random_line_split |
|
block.rs | use ::slice::Slice;
use ::errors::RubbleResult;
use ::util::coding;
use ::status::Status;
use ::comparator::SliceComparator;
use std::mem;
use std::str;
pub struct OwnedBlock {
data: Vec<u8>,
restart_offset: usize,
}
pub struct SliceBlock<'a> {
data: Slice<'a>,
restart_offset: usize,
}
pub trait Block {
fn get_size(&self) -> usize;
fn data(&self) -> Slice;
fn restart_offset(&self) -> usize;
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>;
fn num_restarts(data: Slice) -> usize
{
assert!(data.len() >= mem::size_of::<u32>());
let offset = data.len() - mem::size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
}
impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn compare(&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
|
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared!= 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if!self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self.parse_next_key() && self.next_entry_offset() < self.restarts {
// Keep skipping
}
}
fn corruption_error(&mut self) {
self.current = self.restarts;
self.restart_index = self.num_restarts;
self.status = Status::Corruption("bad entry in block".into());
self.key = String::new();
}
fn parse_next_key(&mut self) -> bool {
self.current = self.next_entry_offset();
let p = &self.data[self.current..];
if p.len() == 0 {
// No more entries to return. Mark as invalid.
self.current = self.restarts;
self.restart_index = self.num_restarts;
return false;
}
let entry = match decode_entry(p) {
Ok(p) => p,
_ => {
self.corruption_error();
return false;
}
};
if self.key.len() < entry.shared as usize {
self.corruption_error();
return false;
}
self.key = str::from_utf8(&entry.new_slice[..entry.non_shared as usize])
.expect("Invalid UTF-8 key")
.to_owned();
self.value_offset = entry.non_shared as usize;
self.value_len = entry.value_length as usize;
while self.restart_index + 1 < self.num_restarts
&& self.get_restart_point(self.restart_index + 1) < self.current
{
self.restart_index += 1;
}
true
}
}
pub struct KVEntry {
key: String,
value: Vec<u8>,
}
impl<'a, T: SliceComparator> Iterator for BlockIterator<'a, T> {
// we will be counting with usize
type Item = KVEntry;
fn next(&mut self) -> Option<KVEntry> {
self.step();
match self.num_restarts {
0 => None,
_ => Some(KVEntry {
key: self.key(),
value: self.value().to_vec(),
})
}
}
}
| {
self.value_offset + self.value_len
} | identifier_body |
block.rs | use ::slice::Slice;
use ::errors::RubbleResult;
use ::util::coding;
use ::status::Status;
use ::comparator::SliceComparator;
use std::mem;
use std::str;
pub struct OwnedBlock {
data: Vec<u8>,
restart_offset: usize,
}
pub struct SliceBlock<'a> {
data: Slice<'a>,
restart_offset: usize,
}
pub trait Block {
fn get_size(&self) -> usize;
fn data(&self) -> Slice;
fn restart_offset(&self) -> usize;
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>;
fn num_restarts(data: Slice) -> usize
{
assert!(data.len() >= mem::size_of::<u32>());
let offset = data.len() - mem::size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
}
impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn | (&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
{
self.value_offset + self.value_len
}
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared!= 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if!self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self.parse_next_key() && self.next_entry_offset() < self.restarts {
// Keep skipping
}
}
fn corruption_error(&mut self) {
self.current = self.restarts;
self.restart_index = self.num_restarts;
self.status = Status::Corruption("bad entry in block".into());
self.key = String::new();
}
fn parse_next_key(&mut self) -> bool {
self.current = self.next_entry_offset();
let p = &self.data[self.current..];
if p.len() == 0 {
// No more entries to return. Mark as invalid.
self.current = self.restarts;
self.restart_index = self.num_restarts;
return false;
}
let entry = match decode_entry(p) {
Ok(p) => p,
_ => {
self.corruption_error();
return false;
}
};
if self.key.len() < entry.shared as usize {
self.corruption_error();
return false;
}
self.key = str::from_utf8(&entry.new_slice[..entry.non_shared as usize])
.expect("Invalid UTF-8 key")
.to_owned();
self.value_offset = entry.non_shared as usize;
self.value_len = entry.value_length as usize;
while self.restart_index + 1 < self.num_restarts
&& self.get_restart_point(self.restart_index + 1) < self.current
{
self.restart_index += 1;
}
true
}
}
pub struct KVEntry {
key: String,
value: Vec<u8>,
}
impl<'a, T: SliceComparator> Iterator for BlockIterator<'a, T> {
// we will be counting with usize
type Item = KVEntry;
fn next(&mut self) -> Option<KVEntry> {
self.step();
match self.num_restarts {
0 => None,
_ => Some(KVEntry {
key: self.key(),
value: self.value().to_vec(),
})
}
}
}
| compare | identifier_name |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if!args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn | () {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| active_command_block_test | identifier_name |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count. | command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if!args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
} | random_line_split |
|
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> |
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if!args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
} | identifier_body |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write | else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if!args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| {
operations.push(OperationType::Write);
} | conditional_block |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len()!= 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action,..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{.. } | Action::CellAction{.. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context,.. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
}; | context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if!entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant {.. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
}
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
} | }
match process_action_buffers( | random_line_split |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len()!= 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn | (context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action,..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{.. } | Action::CellAction{.. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context,.. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if!entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant {.. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
}
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
}
| reset_action_buffers | identifier_name |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len()!= 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action,..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{.. } | Action::CellAction{.. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context,.. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if!entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant {.. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else |
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
}
| {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
} | conditional_block |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self |
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len()!= 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action,..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{.. } | Action::CellAction{.. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context,.. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if!entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant {.. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
}
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
}
| {
ES::Ant(ant)
} | identifier_body |
lower.rs | //! Methods for lower the HIR to types.
pub(crate) use self::diagnostics::LowerDiagnostic;
use crate::resolve::{HasResolver, TypeNs};
use crate::ty::{Substitution, TyKind};
use crate::{
arena::map::ArenaMap,
code_model::StructKind,
diagnostics::DiagnosticSink,
name_resolution::Namespace,
primitive_type::PrimitiveType,
resolve::Resolver,
ty::{FnSig, Ty},
type_ref::{LocalTypeRefId, TypeRef, TypeRefMap, TypeRefSourceMap},
FileId, Function, HirDatabase, ModuleDef, Path, Struct, TypeAlias,
};
use crate::{HasVisibility, Visibility};
use std::{ops::Index, sync::Arc};
/// A struct which holds resolved type references to `Ty`s.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LowerTyMap {
pub(crate) type_ref_to_type: ArenaMap<LocalTypeRefId, Ty>,
pub(crate) diagnostics: Vec<LowerDiagnostic>,
unknown_ty: Ty,
}
impl Default for LowerTyMap {
fn default() -> Self {
LowerTyMap {
type_ref_to_type: Default::default(),
diagnostics: vec![],
unknown_ty: TyKind::Unknown.intern(),
}
}
}
impl Index<LocalTypeRefId> for LowerTyMap {
type Output = Ty;
fn index(&self, expr: LocalTypeRefId) -> &Ty {
self.type_ref_to_type.get(expr).unwrap_or(&self.unknown_ty)
}
}
impl LowerTyMap {
/// Adds all the `LowerDiagnostic`s of the result to the `DiagnosticSink`.
pub(crate) fn add_diagnostics(
&self,
db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
self.diagnostics
.iter()
.for_each(|it| it.add_to(db, file_id, source_map, sink))
}
}
impl Ty {
/// Tries to lower a HIR type reference to an actual resolved type. Besides the type also
/// returns an diagnostics that where encountered along the way.
pub(crate) fn from_hir(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
type_ref: LocalTypeRefId,
) -> (Ty, Vec<diagnostics::LowerDiagnostic>) {
let mut diagnostics = Vec::new();
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut diagnostics, type_ref);
(ty, diagnostics)
}
/// Tries to lower a HIR type reference to an actual resolved type. Takes a mutable reference
/// to a `Vec` which will hold any diagnostics encountered a long the way.
fn from_hir_with_diagnostics(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
diagnostics: &mut Vec<LowerDiagnostic>,
type_ref: LocalTypeRefId,
) -> Ty {
let res = match &type_ref_map[type_ref] {
TypeRef::Path(path) => Ty::from_path(db, resolver, type_ref, path, diagnostics),
TypeRef::Error => Some(TyKind::Unknown.intern()),
TypeRef::Tuple(inner) => {
let inner_tys = inner.iter().map(|tr| {
Self::from_hir_with_diagnostics(db, resolver, type_ref_map, diagnostics, *tr)
});
Some(TyKind::Tuple(inner_tys.len(), inner_tys.collect()).intern())
}
TypeRef::Never => Some(TyKind::Never.intern()),
TypeRef::Array(inner) => {
let inner = Self::from_hir_with_diagnostics(
db,
resolver,
type_ref_map,
diagnostics,
*inner,
);
Some(TyKind::Array(inner).intern())
}
};
if let Some(ty) = res {
ty
} else {
diagnostics.push(LowerDiagnostic::UnresolvedType { id: type_ref });
TyKind::Unknown.intern()
}
}
/// Constructs a `Ty` from a path.
fn from_path(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref: LocalTypeRefId,
path: &Path,
diagnostics: &mut Vec<LowerDiagnostic>,
) -> Option<Self> {
// Find the type
let (ty, vis) = resolver.resolve_path_as_type_fully(db.upcast(), path)?;
// Get the definition and visibility
let def = match ty {
TypeNs::StructId(id) => TypableDef::Struct(id.into()),
TypeNs::TypeAliasId(id) => TypableDef::TypeAlias(id.into()),
TypeNs::PrimitiveType(id) => TypableDef::PrimitiveType(id),
};
// Get the current module and see if the type is visible from here
if let Some(module) = resolver.module() {
if!vis.is_visible_from(db, module) {
diagnostics.push(LowerDiagnostic::TypeIsPrivate { id: type_ref })
}
}
Some(db.type_for_def(def, Namespace::Types))
}
}
/// Resolves all types in the specified `TypeRefMap`.
pub fn lower_types(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
) -> Arc<LowerTyMap> {
let mut result = LowerTyMap::default();
for (id, _) in type_ref_map.iter() {
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut result.diagnostics, id);
result.type_ref_to_type.insert(id, ty);
}
Arc::new(result)
}
pub fn lower_struct_query(db: &dyn HirDatabase, s: Struct) -> Arc<LowerTyMap> {
let data = s.data(db.upcast());
lower_types(db, &s.id.resolver(db.upcast()), data.type_ref_map())
}
pub fn lower_type_alias_query(db: &dyn HirDatabase, t: TypeAlias) -> Arc<LowerTyMap> {
let data = t.data(db.upcast());
lower_types(db, &t.id.resolver(db.upcast()), data.type_ref_map())
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum TypableDef {
Function(Function),
PrimitiveType(PrimitiveType),
Struct(Struct),
TypeAlias(TypeAlias),
}
impl From<Function> for TypableDef {
fn from(f: Function) -> Self {
TypableDef::Function(f)
}
}
impl From<PrimitiveType> for TypableDef {
fn from(f: PrimitiveType) -> Self {
TypableDef::PrimitiveType(f)
}
}
impl From<Struct> for TypableDef {
fn from(f: Struct) -> Self {
TypableDef::Struct(f)
}
}
impl From<ModuleDef> for Option<TypableDef> {
fn from(d: ModuleDef) -> Self {
match d {
ModuleDef::Function(f) => Some(TypableDef::Function(f)),
ModuleDef::PrimitiveType(t) => Some(TypableDef::PrimitiveType(t)),
ModuleDef::Struct(t) => Some(TypableDef::Struct(t)),
ModuleDef::TypeAlias(t) => Some(TypableDef::TypeAlias(t)),
ModuleDef::Module(_) => None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CallableDef {
Function(Function),
Struct(Struct),
}
impl_froms!(CallableDef: Function, Struct);
impl CallableDef {
pub fn is_function(self) -> bool {
matches!(self, CallableDef::Function(_))
}
pub fn is_struct(self) -> bool {
matches!(self, CallableDef::Struct(_))
}
}
impl HasVisibility for CallableDef {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match self {
CallableDef::Struct(strukt) => strukt.visibility(db),
CallableDef::Function(function) => function.visibility(db),
}
}
}
/// Build the declared type of an item. This depends on the namespace; e.g. for
/// `struct Foo(usize)`, we have two types: The type of the struct itself, and
/// the constructor function `(usize) -> Foo` which lives in the values
/// namespace.
pub(crate) fn type_for_def(db: &dyn HirDatabase, def: TypableDef, ns: Namespace) -> Ty {
match (def, ns) {
(TypableDef::Function(f), Namespace::Values) => type_for_fn(db, f),
(TypableDef::PrimitiveType(t), Namespace::Types) => type_for_primitive(t),
(TypableDef::Struct(s), Namespace::Values) => type_for_struct_constructor(db, s),
(TypableDef::Struct(s), Namespace::Types) => type_for_struct(db, s),
(TypableDef::TypeAlias(t), Namespace::Types) => type_for_type_alias(db, t),
// 'error' cases:
(TypableDef::Function(_), Namespace::Types) => TyKind::Unknown.intern(),
(TypableDef::PrimitiveType(_), Namespace::Values) => TyKind::Unknown.intern(),
(TypableDef::TypeAlias(_), Namespace::Values) => TyKind::Unknown.intern(),
}
}
/// Build the declared type of a static.
fn type_for_primitive(def: PrimitiveType) -> Ty {
match def {
PrimitiveType::Float(f) => TyKind::Float(f.into()),
PrimitiveType::Int(i) => TyKind::Int(i.into()),
PrimitiveType::Bool => TyKind::Bool,
}
.intern()
}
/// Build the declared type of a function. This should not need to look at the
/// function body.
fn type_for_fn(_db: &dyn HirDatabase, def: Function) -> Ty {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
}
pub(crate) fn callable_item_sig(db: &dyn HirDatabase, def: CallableDef) -> FnSig {
match def {
CallableDef::Function(f) => fn_sig_for_fn(db, f), | CallableDef::Struct(s) => fn_sig_for_struct_constructor(db, s),
}
}
pub(crate) fn fn_sig_for_fn(db: &dyn HirDatabase, def: Function) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.params()
.iter()
.map(|tr| Ty::from_hir(db, &resolver, data.type_ref_map(), *tr).0)
.collect::<Vec<_>>();
let ret = Ty::from_hir(db, &resolver, data.type_ref_map(), *data.ret_type()).0;
FnSig::from_params_and_return(params, ret)
}
pub(crate) fn fn_sig_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.fields
.iter()
.map(|(_, field)| Ty::from_hir(db, &resolver, data.type_ref_map(), field.type_ref).0)
.collect::<Vec<_>>();
let ret = type_for_struct(db, def);
FnSig::from_params_and_return(params, ret)
}
/// Build the type of a struct constructor.
fn type_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> Ty {
let struct_data = db.struct_data(def.id);
if struct_data.kind == StructKind::Tuple {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
} else {
type_for_struct(db, def)
}
}
fn type_for_struct(_db: &dyn HirDatabase, def: Struct) -> Ty {
TyKind::Struct(def).intern()
}
fn type_for_type_alias(_db: &dyn HirDatabase, def: TypeAlias) -> Ty {
TyKind::TypeAlias(def).intern()
}
pub mod diagnostics {
use crate::diagnostics::{PrivateAccess, UnresolvedType};
use crate::{
diagnostics::DiagnosticSink,
type_ref::{LocalTypeRefId, TypeRefSourceMap},
FileId, HirDatabase,
};
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) enum LowerDiagnostic {
UnresolvedType { id: LocalTypeRefId },
TypeIsPrivate { id: LocalTypeRefId },
}
impl LowerDiagnostic {
pub(crate) fn add_to(
&self,
_db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
match self {
LowerDiagnostic::UnresolvedType { id } => sink.push(UnresolvedType {
file: file_id,
type_ref: source_map.type_ref_syntax(*id).unwrap(),
}),
LowerDiagnostic::TypeIsPrivate { id } => sink.push(PrivateAccess {
file: file_id,
expr: source_map.type_ref_syntax(*id).unwrap().syntax_node_ptr(),
}),
}
}
}
} | random_line_split |
|
lower.rs | //! Methods for lower the HIR to types.
pub(crate) use self::diagnostics::LowerDiagnostic;
use crate::resolve::{HasResolver, TypeNs};
use crate::ty::{Substitution, TyKind};
use crate::{
arena::map::ArenaMap,
code_model::StructKind,
diagnostics::DiagnosticSink,
name_resolution::Namespace,
primitive_type::PrimitiveType,
resolve::Resolver,
ty::{FnSig, Ty},
type_ref::{LocalTypeRefId, TypeRef, TypeRefMap, TypeRefSourceMap},
FileId, Function, HirDatabase, ModuleDef, Path, Struct, TypeAlias,
};
use crate::{HasVisibility, Visibility};
use std::{ops::Index, sync::Arc};
/// A struct which holds resolved type references to `Ty`s.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LowerTyMap {
pub(crate) type_ref_to_type: ArenaMap<LocalTypeRefId, Ty>,
pub(crate) diagnostics: Vec<LowerDiagnostic>,
unknown_ty: Ty,
}
impl Default for LowerTyMap {
fn default() -> Self {
LowerTyMap {
type_ref_to_type: Default::default(),
diagnostics: vec![],
unknown_ty: TyKind::Unknown.intern(),
}
}
}
impl Index<LocalTypeRefId> for LowerTyMap {
type Output = Ty;
fn index(&self, expr: LocalTypeRefId) -> &Ty {
self.type_ref_to_type.get(expr).unwrap_or(&self.unknown_ty)
}
}
impl LowerTyMap {
/// Adds all the `LowerDiagnostic`s of the result to the `DiagnosticSink`.
pub(crate) fn add_diagnostics(
&self,
db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
self.diagnostics
.iter()
.for_each(|it| it.add_to(db, file_id, source_map, sink))
}
}
impl Ty {
/// Tries to lower a HIR type reference to an actual resolved type. Besides the type also
/// returns an diagnostics that where encountered along the way.
pub(crate) fn from_hir(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
type_ref: LocalTypeRefId,
) -> (Ty, Vec<diagnostics::LowerDiagnostic>) {
let mut diagnostics = Vec::new();
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut diagnostics, type_ref);
(ty, diagnostics)
}
/// Tries to lower a HIR type reference to an actual resolved type. Takes a mutable reference
/// to a `Vec` which will hold any diagnostics encountered a long the way.
fn from_hir_with_diagnostics(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
diagnostics: &mut Vec<LowerDiagnostic>,
type_ref: LocalTypeRefId,
) -> Ty {
let res = match &type_ref_map[type_ref] {
TypeRef::Path(path) => Ty::from_path(db, resolver, type_ref, path, diagnostics),
TypeRef::Error => Some(TyKind::Unknown.intern()),
TypeRef::Tuple(inner) => {
let inner_tys = inner.iter().map(|tr| {
Self::from_hir_with_diagnostics(db, resolver, type_ref_map, diagnostics, *tr)
});
Some(TyKind::Tuple(inner_tys.len(), inner_tys.collect()).intern())
}
TypeRef::Never => Some(TyKind::Never.intern()),
TypeRef::Array(inner) => {
let inner = Self::from_hir_with_diagnostics(
db,
resolver,
type_ref_map,
diagnostics,
*inner,
);
Some(TyKind::Array(inner).intern())
}
};
if let Some(ty) = res {
ty
} else {
diagnostics.push(LowerDiagnostic::UnresolvedType { id: type_ref });
TyKind::Unknown.intern()
}
}
/// Constructs a `Ty` from a path.
fn from_path(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref: LocalTypeRefId,
path: &Path,
diagnostics: &mut Vec<LowerDiagnostic>,
) -> Option<Self> {
// Find the type
let (ty, vis) = resolver.resolve_path_as_type_fully(db.upcast(), path)?;
// Get the definition and visibility
let def = match ty {
TypeNs::StructId(id) => TypableDef::Struct(id.into()),
TypeNs::TypeAliasId(id) => TypableDef::TypeAlias(id.into()),
TypeNs::PrimitiveType(id) => TypableDef::PrimitiveType(id),
};
// Get the current module and see if the type is visible from here
if let Some(module) = resolver.module() {
if!vis.is_visible_from(db, module) {
diagnostics.push(LowerDiagnostic::TypeIsPrivate { id: type_ref })
}
}
Some(db.type_for_def(def, Namespace::Types))
}
}
/// Resolves all types in the specified `TypeRefMap`.
pub fn lower_types(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
) -> Arc<LowerTyMap> {
let mut result = LowerTyMap::default();
for (id, _) in type_ref_map.iter() {
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut result.diagnostics, id);
result.type_ref_to_type.insert(id, ty);
}
Arc::new(result)
}
pub fn lower_struct_query(db: &dyn HirDatabase, s: Struct) -> Arc<LowerTyMap> {
let data = s.data(db.upcast());
lower_types(db, &s.id.resolver(db.upcast()), data.type_ref_map())
}
pub fn lower_type_alias_query(db: &dyn HirDatabase, t: TypeAlias) -> Arc<LowerTyMap> {
let data = t.data(db.upcast());
lower_types(db, &t.id.resolver(db.upcast()), data.type_ref_map())
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum TypableDef {
Function(Function),
PrimitiveType(PrimitiveType),
Struct(Struct),
TypeAlias(TypeAlias),
}
impl From<Function> for TypableDef {
fn from(f: Function) -> Self {
TypableDef::Function(f)
}
}
impl From<PrimitiveType> for TypableDef {
fn from(f: PrimitiveType) -> Self {
TypableDef::PrimitiveType(f)
}
}
impl From<Struct> for TypableDef {
fn from(f: Struct) -> Self {
TypableDef::Struct(f)
}
}
impl From<ModuleDef> for Option<TypableDef> {
fn from(d: ModuleDef) -> Self {
match d {
ModuleDef::Function(f) => Some(TypableDef::Function(f)),
ModuleDef::PrimitiveType(t) => Some(TypableDef::PrimitiveType(t)),
ModuleDef::Struct(t) => Some(TypableDef::Struct(t)),
ModuleDef::TypeAlias(t) => Some(TypableDef::TypeAlias(t)),
ModuleDef::Module(_) => None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CallableDef {
Function(Function),
Struct(Struct),
}
impl_froms!(CallableDef: Function, Struct);
impl CallableDef {
pub fn is_function(self) -> bool {
matches!(self, CallableDef::Function(_))
}
pub fn is_struct(self) -> bool {
matches!(self, CallableDef::Struct(_))
}
}
impl HasVisibility for CallableDef {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match self {
CallableDef::Struct(strukt) => strukt.visibility(db),
CallableDef::Function(function) => function.visibility(db),
}
}
}
/// Build the declared type of an item. This depends on the namespace; e.g. for
/// `struct Foo(usize)`, we have two types: The type of the struct itself, and
/// the constructor function `(usize) -> Foo` which lives in the values
/// namespace.
pub(crate) fn type_for_def(db: &dyn HirDatabase, def: TypableDef, ns: Namespace) -> Ty {
match (def, ns) {
(TypableDef::Function(f), Namespace::Values) => type_for_fn(db, f),
(TypableDef::PrimitiveType(t), Namespace::Types) => type_for_primitive(t),
(TypableDef::Struct(s), Namespace::Values) => type_for_struct_constructor(db, s),
(TypableDef::Struct(s), Namespace::Types) => type_for_struct(db, s),
(TypableDef::TypeAlias(t), Namespace::Types) => type_for_type_alias(db, t),
// 'error' cases:
(TypableDef::Function(_), Namespace::Types) => TyKind::Unknown.intern(),
(TypableDef::PrimitiveType(_), Namespace::Values) => TyKind::Unknown.intern(),
(TypableDef::TypeAlias(_), Namespace::Values) => TyKind::Unknown.intern(),
}
}
/// Build the declared type of a static.
fn type_for_primitive(def: PrimitiveType) -> Ty {
match def {
PrimitiveType::Float(f) => TyKind::Float(f.into()),
PrimitiveType::Int(i) => TyKind::Int(i.into()),
PrimitiveType::Bool => TyKind::Bool,
}
.intern()
}
/// Build the declared type of a function. This should not need to look at the
/// function body.
fn type_for_fn(_db: &dyn HirDatabase, def: Function) -> Ty {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
}
pub(crate) fn callable_item_sig(db: &dyn HirDatabase, def: CallableDef) -> FnSig {
match def {
CallableDef::Function(f) => fn_sig_for_fn(db, f),
CallableDef::Struct(s) => fn_sig_for_struct_constructor(db, s),
}
}
pub(crate) fn fn_sig_for_fn(db: &dyn HirDatabase, def: Function) -> FnSig |
pub(crate) fn fn_sig_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.fields
.iter()
.map(|(_, field)| Ty::from_hir(db, &resolver, data.type_ref_map(), field.type_ref).0)
.collect::<Vec<_>>();
let ret = type_for_struct(db, def);
FnSig::from_params_and_return(params, ret)
}
/// Build the type of a struct constructor.
fn type_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> Ty {
let struct_data = db.struct_data(def.id);
if struct_data.kind == StructKind::Tuple {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
} else {
type_for_struct(db, def)
}
}
fn type_for_struct(_db: &dyn HirDatabase, def: Struct) -> Ty {
TyKind::Struct(def).intern()
}
fn type_for_type_alias(_db: &dyn HirDatabase, def: TypeAlias) -> Ty {
TyKind::TypeAlias(def).intern()
}
pub mod diagnostics {
use crate::diagnostics::{PrivateAccess, UnresolvedType};
use crate::{
diagnostics::DiagnosticSink,
type_ref::{LocalTypeRefId, TypeRefSourceMap},
FileId, HirDatabase,
};
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) enum LowerDiagnostic {
UnresolvedType { id: LocalTypeRefId },
TypeIsPrivate { id: LocalTypeRefId },
}
impl LowerDiagnostic {
pub(crate) fn add_to(
&self,
_db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
match self {
LowerDiagnostic::UnresolvedType { id } => sink.push(UnresolvedType {
file: file_id,
type_ref: source_map.type_ref_syntax(*id).unwrap(),
}),
LowerDiagnostic::TypeIsPrivate { id } => sink.push(PrivateAccess {
file: file_id,
expr: source_map.type_ref_syntax(*id).unwrap().syntax_node_ptr(),
}),
}
}
}
}
| {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.params()
.iter()
.map(|tr| Ty::from_hir(db, &resolver, data.type_ref_map(), *tr).0)
.collect::<Vec<_>>();
let ret = Ty::from_hir(db, &resolver, data.type_ref_map(), *data.ret_type()).0;
FnSig::from_params_and_return(params, ret)
} | identifier_body |
lower.rs | //! Methods for lower the HIR to types.
pub(crate) use self::diagnostics::LowerDiagnostic;
use crate::resolve::{HasResolver, TypeNs};
use crate::ty::{Substitution, TyKind};
use crate::{
arena::map::ArenaMap,
code_model::StructKind,
diagnostics::DiagnosticSink,
name_resolution::Namespace,
primitive_type::PrimitiveType,
resolve::Resolver,
ty::{FnSig, Ty},
type_ref::{LocalTypeRefId, TypeRef, TypeRefMap, TypeRefSourceMap},
FileId, Function, HirDatabase, ModuleDef, Path, Struct, TypeAlias,
};
use crate::{HasVisibility, Visibility};
use std::{ops::Index, sync::Arc};
/// A struct which holds resolved type references to `Ty`s.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LowerTyMap {
pub(crate) type_ref_to_type: ArenaMap<LocalTypeRefId, Ty>,
pub(crate) diagnostics: Vec<LowerDiagnostic>,
unknown_ty: Ty,
}
impl Default for LowerTyMap {
fn default() -> Self {
LowerTyMap {
type_ref_to_type: Default::default(),
diagnostics: vec![],
unknown_ty: TyKind::Unknown.intern(),
}
}
}
impl Index<LocalTypeRefId> for LowerTyMap {
type Output = Ty;
fn index(&self, expr: LocalTypeRefId) -> &Ty {
self.type_ref_to_type.get(expr).unwrap_or(&self.unknown_ty)
}
}
impl LowerTyMap {
/// Adds all the `LowerDiagnostic`s of the result to the `DiagnosticSink`.
pub(crate) fn add_diagnostics(
&self,
db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
self.diagnostics
.iter()
.for_each(|it| it.add_to(db, file_id, source_map, sink))
}
}
impl Ty {
/// Tries to lower a HIR type reference to an actual resolved type. Besides the type also
/// returns an diagnostics that where encountered along the way.
pub(crate) fn from_hir(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
type_ref: LocalTypeRefId,
) -> (Ty, Vec<diagnostics::LowerDiagnostic>) {
let mut diagnostics = Vec::new();
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut diagnostics, type_ref);
(ty, diagnostics)
}
/// Tries to lower a HIR type reference to an actual resolved type. Takes a mutable reference
/// to a `Vec` which will hold any diagnostics encountered a long the way.
fn from_hir_with_diagnostics(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
diagnostics: &mut Vec<LowerDiagnostic>,
type_ref: LocalTypeRefId,
) -> Ty {
let res = match &type_ref_map[type_ref] {
TypeRef::Path(path) => Ty::from_path(db, resolver, type_ref, path, diagnostics),
TypeRef::Error => Some(TyKind::Unknown.intern()),
TypeRef::Tuple(inner) => {
let inner_tys = inner.iter().map(|tr| {
Self::from_hir_with_diagnostics(db, resolver, type_ref_map, diagnostics, *tr)
});
Some(TyKind::Tuple(inner_tys.len(), inner_tys.collect()).intern())
}
TypeRef::Never => Some(TyKind::Never.intern()),
TypeRef::Array(inner) => {
let inner = Self::from_hir_with_diagnostics(
db,
resolver,
type_ref_map,
diagnostics,
*inner,
);
Some(TyKind::Array(inner).intern())
}
};
if let Some(ty) = res {
ty
} else {
diagnostics.push(LowerDiagnostic::UnresolvedType { id: type_ref });
TyKind::Unknown.intern()
}
}
/// Constructs a `Ty` from a path.
fn from_path(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref: LocalTypeRefId,
path: &Path,
diagnostics: &mut Vec<LowerDiagnostic>,
) -> Option<Self> {
// Find the type
let (ty, vis) = resolver.resolve_path_as_type_fully(db.upcast(), path)?;
// Get the definition and visibility
let def = match ty {
TypeNs::StructId(id) => TypableDef::Struct(id.into()),
TypeNs::TypeAliasId(id) => TypableDef::TypeAlias(id.into()),
TypeNs::PrimitiveType(id) => TypableDef::PrimitiveType(id),
};
// Get the current module and see if the type is visible from here
if let Some(module) = resolver.module() {
if!vis.is_visible_from(db, module) {
diagnostics.push(LowerDiagnostic::TypeIsPrivate { id: type_ref })
}
}
Some(db.type_for_def(def, Namespace::Types))
}
}
/// Resolves all types in the specified `TypeRefMap`.
pub fn lower_types(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
) -> Arc<LowerTyMap> {
let mut result = LowerTyMap::default();
for (id, _) in type_ref_map.iter() {
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut result.diagnostics, id);
result.type_ref_to_type.insert(id, ty);
}
Arc::new(result)
}
pub fn lower_struct_query(db: &dyn HirDatabase, s: Struct) -> Arc<LowerTyMap> {
let data = s.data(db.upcast());
lower_types(db, &s.id.resolver(db.upcast()), data.type_ref_map())
}
pub fn lower_type_alias_query(db: &dyn HirDatabase, t: TypeAlias) -> Arc<LowerTyMap> {
let data = t.data(db.upcast());
lower_types(db, &t.id.resolver(db.upcast()), data.type_ref_map())
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum TypableDef {
Function(Function),
PrimitiveType(PrimitiveType),
Struct(Struct),
TypeAlias(TypeAlias),
}
impl From<Function> for TypableDef {
fn from(f: Function) -> Self {
TypableDef::Function(f)
}
}
impl From<PrimitiveType> for TypableDef {
fn from(f: PrimitiveType) -> Self {
TypableDef::PrimitiveType(f)
}
}
impl From<Struct> for TypableDef {
fn from(f: Struct) -> Self {
TypableDef::Struct(f)
}
}
impl From<ModuleDef> for Option<TypableDef> {
fn from(d: ModuleDef) -> Self {
match d {
ModuleDef::Function(f) => Some(TypableDef::Function(f)),
ModuleDef::PrimitiveType(t) => Some(TypableDef::PrimitiveType(t)),
ModuleDef::Struct(t) => Some(TypableDef::Struct(t)),
ModuleDef::TypeAlias(t) => Some(TypableDef::TypeAlias(t)),
ModuleDef::Module(_) => None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CallableDef {
Function(Function),
Struct(Struct),
}
impl_froms!(CallableDef: Function, Struct);
impl CallableDef {
pub fn is_function(self) -> bool {
matches!(self, CallableDef::Function(_))
}
pub fn is_struct(self) -> bool {
matches!(self, CallableDef::Struct(_))
}
}
impl HasVisibility for CallableDef {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match self {
CallableDef::Struct(strukt) => strukt.visibility(db),
CallableDef::Function(function) => function.visibility(db),
}
}
}
/// Build the declared type of an item. This depends on the namespace; e.g. for
/// `struct Foo(usize)`, we have two types: The type of the struct itself, and
/// the constructor function `(usize) -> Foo` which lives in the values
/// namespace.
pub(crate) fn type_for_def(db: &dyn HirDatabase, def: TypableDef, ns: Namespace) -> Ty {
match (def, ns) {
(TypableDef::Function(f), Namespace::Values) => type_for_fn(db, f),
(TypableDef::PrimitiveType(t), Namespace::Types) => type_for_primitive(t),
(TypableDef::Struct(s), Namespace::Values) => type_for_struct_constructor(db, s),
(TypableDef::Struct(s), Namespace::Types) => type_for_struct(db, s),
(TypableDef::TypeAlias(t), Namespace::Types) => type_for_type_alias(db, t),
// 'error' cases:
(TypableDef::Function(_), Namespace::Types) => TyKind::Unknown.intern(),
(TypableDef::PrimitiveType(_), Namespace::Values) => TyKind::Unknown.intern(),
(TypableDef::TypeAlias(_), Namespace::Values) => TyKind::Unknown.intern(),
}
}
/// Build the declared type of a static.
fn type_for_primitive(def: PrimitiveType) -> Ty {
match def {
PrimitiveType::Float(f) => TyKind::Float(f.into()),
PrimitiveType::Int(i) => TyKind::Int(i.into()),
PrimitiveType::Bool => TyKind::Bool,
}
.intern()
}
/// Build the declared type of a function. This should not need to look at the
/// function body.
fn type_for_fn(_db: &dyn HirDatabase, def: Function) -> Ty {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
}
pub(crate) fn callable_item_sig(db: &dyn HirDatabase, def: CallableDef) -> FnSig {
match def {
CallableDef::Function(f) => fn_sig_for_fn(db, f),
CallableDef::Struct(s) => fn_sig_for_struct_constructor(db, s),
}
}
pub(crate) fn fn_sig_for_fn(db: &dyn HirDatabase, def: Function) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.params()
.iter()
.map(|tr| Ty::from_hir(db, &resolver, data.type_ref_map(), *tr).0)
.collect::<Vec<_>>();
let ret = Ty::from_hir(db, &resolver, data.type_ref_map(), *data.ret_type()).0;
FnSig::from_params_and_return(params, ret)
}
pub(crate) fn fn_sig_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.fields
.iter()
.map(|(_, field)| Ty::from_hir(db, &resolver, data.type_ref_map(), field.type_ref).0)
.collect::<Vec<_>>();
let ret = type_for_struct(db, def);
FnSig::from_params_and_return(params, ret)
}
/// Build the type of a struct constructor.
fn type_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> Ty {
let struct_data = db.struct_data(def.id);
if struct_data.kind == StructKind::Tuple | else {
type_for_struct(db, def)
}
}
fn type_for_struct(_db: &dyn HirDatabase, def: Struct) -> Ty {
TyKind::Struct(def).intern()
}
fn type_for_type_alias(_db: &dyn HirDatabase, def: TypeAlias) -> Ty {
TyKind::TypeAlias(def).intern()
}
pub mod diagnostics {
use crate::diagnostics::{PrivateAccess, UnresolvedType};
use crate::{
diagnostics::DiagnosticSink,
type_ref::{LocalTypeRefId, TypeRefSourceMap},
FileId, HirDatabase,
};
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) enum LowerDiagnostic {
UnresolvedType { id: LocalTypeRefId },
TypeIsPrivate { id: LocalTypeRefId },
}
impl LowerDiagnostic {
pub(crate) fn add_to(
&self,
_db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
match self {
LowerDiagnostic::UnresolvedType { id } => sink.push(UnresolvedType {
file: file_id,
type_ref: source_map.type_ref_syntax(*id).unwrap(),
}),
LowerDiagnostic::TypeIsPrivate { id } => sink.push(PrivateAccess {
file: file_id,
expr: source_map.type_ref_syntax(*id).unwrap().syntax_node_ptr(),
}),
}
}
}
}
| {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
} | conditional_block |
lower.rs | //! Methods for lower the HIR to types.
pub(crate) use self::diagnostics::LowerDiagnostic;
use crate::resolve::{HasResolver, TypeNs};
use crate::ty::{Substitution, TyKind};
use crate::{
arena::map::ArenaMap,
code_model::StructKind,
diagnostics::DiagnosticSink,
name_resolution::Namespace,
primitive_type::PrimitiveType,
resolve::Resolver,
ty::{FnSig, Ty},
type_ref::{LocalTypeRefId, TypeRef, TypeRefMap, TypeRefSourceMap},
FileId, Function, HirDatabase, ModuleDef, Path, Struct, TypeAlias,
};
use crate::{HasVisibility, Visibility};
use std::{ops::Index, sync::Arc};
/// A struct which holds resolved type references to `Ty`s.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LowerTyMap {
pub(crate) type_ref_to_type: ArenaMap<LocalTypeRefId, Ty>,
pub(crate) diagnostics: Vec<LowerDiagnostic>,
unknown_ty: Ty,
}
impl Default for LowerTyMap {
fn default() -> Self {
LowerTyMap {
type_ref_to_type: Default::default(),
diagnostics: vec![],
unknown_ty: TyKind::Unknown.intern(),
}
}
}
impl Index<LocalTypeRefId> for LowerTyMap {
type Output = Ty;
fn index(&self, expr: LocalTypeRefId) -> &Ty {
self.type_ref_to_type.get(expr).unwrap_or(&self.unknown_ty)
}
}
impl LowerTyMap {
/// Adds all the `LowerDiagnostic`s of the result to the `DiagnosticSink`.
pub(crate) fn add_diagnostics(
&self,
db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
self.diagnostics
.iter()
.for_each(|it| it.add_to(db, file_id, source_map, sink))
}
}
impl Ty {
/// Tries to lower a HIR type reference to an actual resolved type. Besides the type also
/// returns an diagnostics that where encountered along the way.
pub(crate) fn from_hir(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
type_ref: LocalTypeRefId,
) -> (Ty, Vec<diagnostics::LowerDiagnostic>) {
let mut diagnostics = Vec::new();
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut diagnostics, type_ref);
(ty, diagnostics)
}
/// Tries to lower a HIR type reference to an actual resolved type. Takes a mutable reference
/// to a `Vec` which will hold any diagnostics encountered a long the way.
fn from_hir_with_diagnostics(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
diagnostics: &mut Vec<LowerDiagnostic>,
type_ref: LocalTypeRefId,
) -> Ty {
let res = match &type_ref_map[type_ref] {
TypeRef::Path(path) => Ty::from_path(db, resolver, type_ref, path, diagnostics),
TypeRef::Error => Some(TyKind::Unknown.intern()),
TypeRef::Tuple(inner) => {
let inner_tys = inner.iter().map(|tr| {
Self::from_hir_with_diagnostics(db, resolver, type_ref_map, diagnostics, *tr)
});
Some(TyKind::Tuple(inner_tys.len(), inner_tys.collect()).intern())
}
TypeRef::Never => Some(TyKind::Never.intern()),
TypeRef::Array(inner) => {
let inner = Self::from_hir_with_diagnostics(
db,
resolver,
type_ref_map,
diagnostics,
*inner,
);
Some(TyKind::Array(inner).intern())
}
};
if let Some(ty) = res {
ty
} else {
diagnostics.push(LowerDiagnostic::UnresolvedType { id: type_ref });
TyKind::Unknown.intern()
}
}
/// Constructs a `Ty` from a path.
fn from_path(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref: LocalTypeRefId,
path: &Path,
diagnostics: &mut Vec<LowerDiagnostic>,
) -> Option<Self> {
// Find the type
let (ty, vis) = resolver.resolve_path_as_type_fully(db.upcast(), path)?;
// Get the definition and visibility
let def = match ty {
TypeNs::StructId(id) => TypableDef::Struct(id.into()),
TypeNs::TypeAliasId(id) => TypableDef::TypeAlias(id.into()),
TypeNs::PrimitiveType(id) => TypableDef::PrimitiveType(id),
};
// Get the current module and see if the type is visible from here
if let Some(module) = resolver.module() {
if!vis.is_visible_from(db, module) {
diagnostics.push(LowerDiagnostic::TypeIsPrivate { id: type_ref })
}
}
Some(db.type_for_def(def, Namespace::Types))
}
}
/// Resolves all types in the specified `TypeRefMap`.
pub fn lower_types(
db: &dyn HirDatabase,
resolver: &Resolver,
type_ref_map: &TypeRefMap,
) -> Arc<LowerTyMap> {
let mut result = LowerTyMap::default();
for (id, _) in type_ref_map.iter() {
let ty =
Ty::from_hir_with_diagnostics(db, resolver, type_ref_map, &mut result.diagnostics, id);
result.type_ref_to_type.insert(id, ty);
}
Arc::new(result)
}
pub fn lower_struct_query(db: &dyn HirDatabase, s: Struct) -> Arc<LowerTyMap> {
let data = s.data(db.upcast());
lower_types(db, &s.id.resolver(db.upcast()), data.type_ref_map())
}
pub fn lower_type_alias_query(db: &dyn HirDatabase, t: TypeAlias) -> Arc<LowerTyMap> {
let data = t.data(db.upcast());
lower_types(db, &t.id.resolver(db.upcast()), data.type_ref_map())
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum TypableDef {
Function(Function),
PrimitiveType(PrimitiveType),
Struct(Struct),
TypeAlias(TypeAlias),
}
impl From<Function> for TypableDef {
fn from(f: Function) -> Self {
TypableDef::Function(f)
}
}
impl From<PrimitiveType> for TypableDef {
fn from(f: PrimitiveType) -> Self {
TypableDef::PrimitiveType(f)
}
}
impl From<Struct> for TypableDef {
fn from(f: Struct) -> Self {
TypableDef::Struct(f)
}
}
impl From<ModuleDef> for Option<TypableDef> {
fn from(d: ModuleDef) -> Self {
match d {
ModuleDef::Function(f) => Some(TypableDef::Function(f)),
ModuleDef::PrimitiveType(t) => Some(TypableDef::PrimitiveType(t)),
ModuleDef::Struct(t) => Some(TypableDef::Struct(t)),
ModuleDef::TypeAlias(t) => Some(TypableDef::TypeAlias(t)),
ModuleDef::Module(_) => None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CallableDef {
Function(Function),
Struct(Struct),
}
impl_froms!(CallableDef: Function, Struct);
impl CallableDef {
pub fn is_function(self) -> bool {
matches!(self, CallableDef::Function(_))
}
pub fn | (self) -> bool {
matches!(self, CallableDef::Struct(_))
}
}
impl HasVisibility for CallableDef {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match self {
CallableDef::Struct(strukt) => strukt.visibility(db),
CallableDef::Function(function) => function.visibility(db),
}
}
}
/// Build the declared type of an item. This depends on the namespace; e.g. for
/// `struct Foo(usize)`, we have two types: The type of the struct itself, and
/// the constructor function `(usize) -> Foo` which lives in the values
/// namespace.
pub(crate) fn type_for_def(db: &dyn HirDatabase, def: TypableDef, ns: Namespace) -> Ty {
match (def, ns) {
(TypableDef::Function(f), Namespace::Values) => type_for_fn(db, f),
(TypableDef::PrimitiveType(t), Namespace::Types) => type_for_primitive(t),
(TypableDef::Struct(s), Namespace::Values) => type_for_struct_constructor(db, s),
(TypableDef::Struct(s), Namespace::Types) => type_for_struct(db, s),
(TypableDef::TypeAlias(t), Namespace::Types) => type_for_type_alias(db, t),
// 'error' cases:
(TypableDef::Function(_), Namespace::Types) => TyKind::Unknown.intern(),
(TypableDef::PrimitiveType(_), Namespace::Values) => TyKind::Unknown.intern(),
(TypableDef::TypeAlias(_), Namespace::Values) => TyKind::Unknown.intern(),
}
}
/// Build the declared type of a static.
fn type_for_primitive(def: PrimitiveType) -> Ty {
match def {
PrimitiveType::Float(f) => TyKind::Float(f.into()),
PrimitiveType::Int(i) => TyKind::Int(i.into()),
PrimitiveType::Bool => TyKind::Bool,
}
.intern()
}
/// Build the declared type of a function. This should not need to look at the
/// function body.
fn type_for_fn(_db: &dyn HirDatabase, def: Function) -> Ty {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
}
pub(crate) fn callable_item_sig(db: &dyn HirDatabase, def: CallableDef) -> FnSig {
match def {
CallableDef::Function(f) => fn_sig_for_fn(db, f),
CallableDef::Struct(s) => fn_sig_for_struct_constructor(db, s),
}
}
pub(crate) fn fn_sig_for_fn(db: &dyn HirDatabase, def: Function) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.params()
.iter()
.map(|tr| Ty::from_hir(db, &resolver, data.type_ref_map(), *tr).0)
.collect::<Vec<_>>();
let ret = Ty::from_hir(db, &resolver, data.type_ref_map(), *data.ret_type()).0;
FnSig::from_params_and_return(params, ret)
}
pub(crate) fn fn_sig_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> FnSig {
let data = def.data(db.upcast());
let resolver = def.id.resolver(db.upcast());
let params = data
.fields
.iter()
.map(|(_, field)| Ty::from_hir(db, &resolver, data.type_ref_map(), field.type_ref).0)
.collect::<Vec<_>>();
let ret = type_for_struct(db, def);
FnSig::from_params_and_return(params, ret)
}
/// Build the type of a struct constructor.
fn type_for_struct_constructor(db: &dyn HirDatabase, def: Struct) -> Ty {
let struct_data = db.struct_data(def.id);
if struct_data.kind == StructKind::Tuple {
TyKind::FnDef(def.into(), Substitution::empty()).intern()
} else {
type_for_struct(db, def)
}
}
fn type_for_struct(_db: &dyn HirDatabase, def: Struct) -> Ty {
TyKind::Struct(def).intern()
}
fn type_for_type_alias(_db: &dyn HirDatabase, def: TypeAlias) -> Ty {
TyKind::TypeAlias(def).intern()
}
pub mod diagnostics {
use crate::diagnostics::{PrivateAccess, UnresolvedType};
use crate::{
diagnostics::DiagnosticSink,
type_ref::{LocalTypeRefId, TypeRefSourceMap},
FileId, HirDatabase,
};
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) enum LowerDiagnostic {
UnresolvedType { id: LocalTypeRefId },
TypeIsPrivate { id: LocalTypeRefId },
}
impl LowerDiagnostic {
pub(crate) fn add_to(
&self,
_db: &dyn HirDatabase,
file_id: FileId,
source_map: &TypeRefSourceMap,
sink: &mut DiagnosticSink,
) {
match self {
LowerDiagnostic::UnresolvedType { id } => sink.push(UnresolvedType {
file: file_id,
type_ref: source_map.type_ref_syntax(*id).unwrap(),
}),
LowerDiagnostic::TypeIsPrivate { id } => sink.push(PrivateAccess {
file: file_id,
expr: source_map.type_ref_syntax(*id).unwrap().syntax_node_ptr(),
}),
}
}
}
}
| is_struct | identifier_name |
movegen.rs | Drop;
use std::iter::ExactSizeIterator;
use std::mem;
#[derive(Copy, Clone, PartialEq, PartialOrd)] | square: Square,
bitboard: BitBoard,
promotion: bool,
}
impl SquareAndBitBoard {
pub fn new(sq: Square, bb: BitBoard, promotion: bool) -> SquareAndBitBoard {
SquareAndBitBoard {
square: sq,
bitboard: bb,
promotion: promotion,
}
}
}
pub type MoveList = NoDrop<ArrayVec<SquareAndBitBoard, 18>>;
/// An incremental move generator
///
/// This structure enumerates moves slightly slower than board.enumerate_moves(...),
/// but has some extra features, such as:
///
/// * Being an iterator
/// * Not requiring you to create a buffer
/// * Only iterating moves that match a certain pattern
/// * Being iterable multiple times (such as, iterating once for all captures, then iterating again
/// for all quiets)
/// * Doing as little work early on as possible, so that if you are not going to look at every move, the
/// struture moves faster
/// * Being able to iterate pseudo legal moves, while keeping the (nearly) free legality checks in
/// place
///
/// # Examples
///
/// ```
/// use chess::MoveGen;
/// use chess::Board;
/// use chess::EMPTY;
/// use chess::construct;
///
/// // create a board with the initial position
/// let board = Board::default();
///
/// // create an iterable
/// let mut iterable = MoveGen::new_legal(&board);
///
/// // make sure.len() works.
/// assert_eq!(iterable.len(), 20); // the.len() function does *not* consume the iterator
///
/// // lets iterate over targets.
/// let targets = board.color_combined(!board.side_to_move());
/// iterable.set_iterator_mask(*targets);
///
/// // count the number of targets
/// let mut count = 0;
/// for _ in &mut iterable {
/// count += 1;
/// // This move captures one of my opponents pieces (with the exception of en passant)
/// }
///
/// // now, iterate over the rest of the moves
/// iterable.set_iterator_mask(!EMPTY);
/// for _ in &mut iterable {
/// count += 1;
/// // This move does not capture anything
/// }
///
/// // make sure it works
/// assert_eq!(count, 20);
///
/// ```
pub struct MoveGen {
moves: MoveList,
promotion_index: usize,
iterator_mask: BitBoard,
index: usize,
}
impl MoveGen {
#[inline(always)]
fn enumerate_moves(board: &Board) -> MoveList {
let checkers = *board.checkers();
let mask =!board.color_combined(board.side_to_move());
let mut movelist = NoDrop::new(ArrayVec::<SquareAndBitBoard, 18>::new());
if checkers == EMPTY {
PawnType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KnightType::legals::<NotInCheckType>(&mut movelist, &board, mask);
BishopType::legals::<NotInCheckType>(&mut movelist, &board, mask);
RookType::legals::<NotInCheckType>(&mut movelist, &board, mask);
QueenType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KingType::legals::<NotInCheckType>(&mut movelist, &board, mask);
} else if checkers.popcnt() == 1 {
PawnType::legals::<InCheckType>(&mut movelist, &board, mask);
KnightType::legals::<InCheckType>(&mut movelist, &board, mask);
BishopType::legals::<InCheckType>(&mut movelist, &board, mask);
RookType::legals::<InCheckType>(&mut movelist, &board, mask);
QueenType::legals::<InCheckType>(&mut movelist, &board, mask);
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
} else {
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
}
movelist
}
/// Create a new `MoveGen` structure, only generating legal moves
#[inline(always)]
pub fn new_legal(board: &Board) -> MoveGen {
MoveGen {
moves: MoveGen::enumerate_moves(board),
promotion_index: 0,
iterator_mask:!EMPTY,
index: 0,
}
}
/// Never, ever, iterate any moves that land on the following squares
pub fn remove_mask(&mut self, mask: BitBoard) {
for x in 0..self.moves.len() {
self.moves[x].bitboard &=!mask;
}
}
/// Never, ever, iterate this move
pub fn remove_move(&mut self, chess_move: ChessMove) -> bool {
for x in 0..self.moves.len() {
if self.moves[x].square == chess_move.get_source() {
self.moves[x].bitboard &=!BitBoard::from_square(chess_move.get_dest());
return true;
}
}
false
}
/// For now, Only iterate moves that land on the following squares
/// Note: Once iteration is completed, you can pass in a mask of! `EMPTY`
/// to get the remaining moves, or another mask
pub fn set_iterator_mask(&mut self, mask: BitBoard) {
self.iterator_mask = mask;
self.index = 0;
// the iterator portion of this struct relies on the invariant that
// the bitboards at the beginning of the moves[] array are the only
// ones used. As a result, we must partition the list such that the
// assumption is true.
// first, find the first non-used moves index, and store that in i
let mut i = 0;
while i < self.moves.len() && self.moves[i].bitboard & self.iterator_mask!= EMPTY {
i += 1;
}
// next, find each element past i where the moves are used, and store
// that in i. Then, increment i to point to a new unused slot.
for j in (i + 1)..self.moves.len() {
if self.moves[j].bitboard & self.iterator_mask!= EMPTY {
let backup = self.moves[i];
self.moves[i] = self.moves[j];
self.moves[j] = backup;
i += 1;
}
}
}
/// This function checks the legality *only for moves generated by `MoveGen`*.
///
/// Calling this function for moves not generated by `MoveGen` will result in possibly
/// incorrect results, and making that move on the `Board` will result in undefined behavior.
/// This function may panic! if these rules are not followed.
///
/// If you are validating a move from a user, you should call the.legal() function.
pub fn legal_quick(board: &Board, chess_move: ChessMove) -> bool {
let piece = board.piece_on(chess_move.get_source()).unwrap();
match piece {
Piece::Rook => true,
Piece::Bishop => true,
Piece::Knight => true,
Piece::Queen => true,
Piece::Pawn => {
if chess_move.get_source().get_file()!= chess_move.get_dest().get_file()
&& board.piece_on(chess_move.get_dest()).is_none()
{
// en-passant
PawnType::legal_ep_move(board, chess_move.get_source(), chess_move.get_dest())
} else {
true
}
}
Piece::King => {
let bb = between(chess_move.get_source(), chess_move.get_dest());
if bb.popcnt() == 1 {
// castles
if!KingType::legal_king_move(board, bb.to_square()) {
false
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
}
}
}
/// Fastest perft test with this structure
pub fn movegen_perft_test(board: &Board, depth: usize) -> usize {
let iterable = MoveGen::new_legal(board);
let mut result: usize = 0;
if depth == 1 {
iterable.len()
} else {
for m in iterable {
let bresult = board.make_move_new(m);
result += MoveGen::movegen_perft_test(&bresult, depth - 1);
}
result
}
}
#[cfg(test)]
/// Do a perft test after splitting the moves up into two groups
pub fn movegen_perft_test_piecewise(board: &Board, depth: usize) -> usize {
let mut iterable = MoveGen::new_legal(board);
let targets = board.color_combined(!board.side_to_move());
let mut result: usize = 0;
if depth == 1 {
iterable.set_iterator_mask(*targets);
result += iterable.len();
iterable.set_iterator_mask(!targets);
result += iterable.len();
result
} else {
iterable.set_iterator_mask(*targets);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
iterable.set_iterator_mask(!EMPTY);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
result
}
}
}
impl ExactSizeIterator for MoveGen {
/// Give the exact length of this iterator
fn len(&self) -> usize {
let mut result = 0;
for i in 0..self.moves.len() {
if self.moves[i].bitboard & self.iterator_mask == EMPTY {
break;
}
if self.moves[i].promotion {
result += ((self.moves[i].bitboard & self.iterator_mask).popcnt() as usize)
* NUM_PROMOTION_PIECES;
} else {
result += (self.moves[i].bitboard & self.iterator_mask).popcnt() as usize;
}
}
result
}
}
impl Iterator for MoveGen {
type Item = ChessMove;
/// Give a size_hint to some functions that need it
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
/// Find the next chess move.
fn next(&mut self) -> Option<ChessMove> {
if self.index >= self.moves.len()
|| self.moves[self.index].bitboard & self.iterator_mask == EMPTY
{
// are we done?
None
} else if self.moves[self.index].promotion {
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
// deal with potential promotions for this pawn
let result = ChessMove::new(
moves.square,
dest,
Some(PROMOTION_PIECES[self.promotion_index]),
);
self.promotion_index += 1;
if self.promotion_index >= NUM_PROMOTION_PIECES {
moves.bitboard ^= BitBoard::from_square(dest);
self.promotion_index = 0;
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
}
Some(result)
} else {
// not a promotion move, so its a 'normal' move as far as this function is concerned
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
moves.bitboard ^= BitBoard::from_square(dest);
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
Some(ChessMove::new(moves.square, dest, None))
}
}
}
#[cfg(test)]
use crate::board_builder::BoardBuilder;
#[cfg(test)]
use std::collections::HashSet;
#[cfg(test)]
use std::convert::TryInto;
#[cfg(test)]
use std::str::FromStr;
#[cfg(test)]
fn movegen_perft_test(fen: String, depth: usize, result: usize) {
let board: Board = BoardBuilder::from_str(&fen).unwrap().try_into().unwrap();
assert_eq!(MoveGen::movegen_perft_test(&board, depth), result);
assert_eq!(MoveGen::movegen_perft_test_piecewise(&board, depth), result);
}
#[test]
fn movegen_perft_kiwipete() {
movegen_perft_test(
"r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq - 0 1".to_owned(),
5,
193690690,
);
}
#[test]
fn movegen_perft_1() {
movegen_perft_test("8/5bk1/8/2Pp4/8/1K6/8/8 w - d6 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
fn movegen_perft_2() {
movegen_perft_test("8/8/1k6/8/2pP4/8/5BK1/8 b - d3 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
fn movegen_perft_3() {
movegen_perft_test("8/8/1k6/2b5/2pP4/8/5K2/8 b - d3 0 1".to_owned(), 6, 1440467);
}
#[test]
fn movegen_perft_4() {
movegen_perft_test("8/5k2/8/2Pp4/2B5/1K6/8/8 w - d6 0 1".to_owned(), 6, 1440467);
}
#[test]
fn movegen_perft_5() {
movegen_perft_test("5k2/8/8/8/8/8/8/4K2R w K - 0 1".to_owned(), 6, 661072);
}
#[test]
fn movegen_perft_6() {
movegen_perft_test("4k2r/8/8/8/8/8/8/5K2 b k - 0 1".to_owned(), 6, 661072);
}
#[test]
fn movegen_perft_7() {
movegen_perft_test("3k4/8/8/8/8/8/8/R3K3 w Q - 0 1".to_owned(), 6, 803711);
}
#[test]
fn movegen_perft_8() {
movegen_perft_test("r3k3/8/8/8/8/8/8/3K4 b q - 0 1".to_owned(), 6, 803711);
}
#[test]
fn movegen_perft_9() {
movegen_perft_test(
"r3k2r/1b4bq/8/8/8/8/7B/R3K2R w KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
fn movegen_perft_10() {
movegen_perft_test(
"r3k2r/7b/8/8/8/8/1B4BQ/R3K2R b KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
fn movegen_perft_11() {
movegen_perft_test(
"r3k2r/8/3Q4/8/8/5q2/8/R3K2R b KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
fn movegen_perft_12() {
movegen_perft_test(
"r3k2r/8/5Q2/8/8/3q4/8/R3K2R w KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
fn movegen_perft_13() {
movegen_perft_test("2K2r2/4P3/8/8/8/8/8/3k4 w - - 0 1".to_owned(), 6, 3821001);
}
#[test]
fn movegen_perft_14() {
movegen_perft_test("3K4/8/8/8/8/8/4p3/2k2R2 b - - 0 1".to_owned(), 6, 3821001);
}
#[test]
fn movegen_perft_15() {
movegen_perft_test("8/8/1P2K3/8/2n5/1q6/8/5k2 b - - 0 1".to_owned(), 5, 1004658);
}
#[test]
fn movegen_perft_16() {
movegen_perft_test("5K2/8/1Q6/2N5/8/1p2k3/8/8 w - - 0 1".to_owned(), 5, 1004658);
}
#[test]
fn movegen_perft_17() {
movegen_perft_test("4k3/1P6/8/8/8/8/K7/8 w - - 0 1".to_owned(), 6, 217342);
}
#[test]
fn movegen_perft_18() {
movegen_perft_test("8/k7/8/8/8/8/1p6/4K3 b - - 0 1".to_owned(), 6, 217342);
}
#[test]
fn movegen_perft_19() {
movegen_perft_test("8/P1k5/K7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 92683);
}
#[test]
fn movegen_perft_20() {
movegen_perft_test("8/8/8/8/8/k7/p1K5/8 b - - 0 1".to_owned(), 6, 92683);
}
#[test]
fn movegen_perft_21() {
movegen_perft_test("K1k5/8/P7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 2217);
}
#[test]
fn movegen_perft_22() {
movegen_perft_test("8/8/8/8/8/p7/8/k1K5 b - - 0 1".to_owned(), 6, 2217);
}
#[test]
fn movegen_perft_23() {
movegen_perft_test("8/k1P5/8/1K6/8/8/8/8 w - - 0 1".to_owned(), 7, 567584);
}
#[test]
fn movegen_perft_24() {
movegen_perft_test("8/8/8/8/1k6/8/K1p5/8 b - - 0 1".to_owned(), 7, 567584);
}
#[test]
fn movegen_perft_25() {
movegen_perft_test("8/8/2k5/5q2/5n2/8/5K2/8 b - - 0 1".to_owned(), 4, 23527);
}
#[test]
fn movegen_perft_26() {
movegen_perft_test("8/5k2/8/5N2/5Q2/2K5/8/8 w - - 0 1".to_owned(), 4, 23527);
}
#[test]
fn movegen_issue_15() {
let board =
BoardBuilder::from_str("rnbqkbnr/ppp2pp1/4p3/3N4/3PpPp1/8/PPP3PP/R1B1KBNR b KQkq f3 0 1")
.unwrap()
.try_into()
.unwrap();
let _ = MoveGen::new_legal(&board);
}
#[cfg(test)]
fn move_of(m: &str) -> ChessMove {
let promo = if m.len() > 4 {
Some(match m.as_bytes()[4] {
b'q' => Piece::Queen,
b'r' => Piece::Rook,
b'b' => Piece::Bishop,
b'n' => Piece::Knight,
_ => panic!("unrecognized uci move: {}", m),
})
} else {
None
};
ChessMove::new(
Square::from_str(&m[..2]).unwrap(),
Square::from_str(&m[2..4]).unwrap(),
promo,
)
}
#[test]
fn test_masked_move_gen() {
let board =
Board::from_str("r1bqkb1r/pp3ppp/5n2/2ppn1N1/4pP2/1BN1P3/PPPP2PP/R1BQ1RK1 w kq - 0 9")
.unwrap();
let mut capture_moves = MoveGen:: | pub struct SquareAndBitBoard { | random_line_split |
movegen.rs | ;
use std::iter::ExactSizeIterator;
use std::mem;
#[derive(Copy, Clone, PartialEq, PartialOrd)]
pub struct SquareAndBitBoard {
square: Square,
bitboard: BitBoard,
promotion: bool,
}
impl SquareAndBitBoard {
pub fn new(sq: Square, bb: BitBoard, promotion: bool) -> SquareAndBitBoard {
SquareAndBitBoard {
square: sq,
bitboard: bb,
promotion: promotion,
}
}
}
pub type MoveList = NoDrop<ArrayVec<SquareAndBitBoard, 18>>;
/// An incremental move generator
///
/// This structure enumerates moves slightly slower than board.enumerate_moves(...),
/// but has some extra features, such as:
///
/// * Being an iterator
/// * Not requiring you to create a buffer
/// * Only iterating moves that match a certain pattern
/// * Being iterable multiple times (such as, iterating once for all captures, then iterating again
/// for all quiets)
/// * Doing as little work early on as possible, so that if you are not going to look at every move, the
/// struture moves faster
/// * Being able to iterate pseudo legal moves, while keeping the (nearly) free legality checks in
/// place
///
/// # Examples
///
/// ```
/// use chess::MoveGen;
/// use chess::Board;
/// use chess::EMPTY;
/// use chess::construct;
///
/// // create a board with the initial position
/// let board = Board::default();
///
/// // create an iterable
/// let mut iterable = MoveGen::new_legal(&board);
///
/// // make sure.len() works.
/// assert_eq!(iterable.len(), 20); // the.len() function does *not* consume the iterator
///
/// // lets iterate over targets.
/// let targets = board.color_combined(!board.side_to_move());
/// iterable.set_iterator_mask(*targets);
///
/// // count the number of targets
/// let mut count = 0;
/// for _ in &mut iterable {
/// count += 1;
/// // This move captures one of my opponents pieces (with the exception of en passant)
/// }
///
/// // now, iterate over the rest of the moves
/// iterable.set_iterator_mask(!EMPTY);
/// for _ in &mut iterable {
/// count += 1;
/// // This move does not capture anything
/// }
///
/// // make sure it works
/// assert_eq!(count, 20);
///
/// ```
pub struct MoveGen {
moves: MoveList,
promotion_index: usize,
iterator_mask: BitBoard,
index: usize,
}
impl MoveGen {
#[inline(always)]
fn enumerate_moves(board: &Board) -> MoveList {
let checkers = *board.checkers();
let mask =!board.color_combined(board.side_to_move());
let mut movelist = NoDrop::new(ArrayVec::<SquareAndBitBoard, 18>::new());
if checkers == EMPTY {
PawnType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KnightType::legals::<NotInCheckType>(&mut movelist, &board, mask);
BishopType::legals::<NotInCheckType>(&mut movelist, &board, mask);
RookType::legals::<NotInCheckType>(&mut movelist, &board, mask);
QueenType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KingType::legals::<NotInCheckType>(&mut movelist, &board, mask);
} else if checkers.popcnt() == 1 {
PawnType::legals::<InCheckType>(&mut movelist, &board, mask);
KnightType::legals::<InCheckType>(&mut movelist, &board, mask);
BishopType::legals::<InCheckType>(&mut movelist, &board, mask);
RookType::legals::<InCheckType>(&mut movelist, &board, mask);
QueenType::legals::<InCheckType>(&mut movelist, &board, mask);
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
} else {
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
}
movelist
}
/// Create a new `MoveGen` structure, only generating legal moves
#[inline(always)]
pub fn new_legal(board: &Board) -> MoveGen {
MoveGen {
moves: MoveGen::enumerate_moves(board),
promotion_index: 0,
iterator_mask:!EMPTY,
index: 0,
}
}
/// Never, ever, iterate any moves that land on the following squares
pub fn remove_mask(&mut self, mask: BitBoard) {
for x in 0..self.moves.len() {
self.moves[x].bitboard &=!mask;
}
}
/// Never, ever, iterate this move
pub fn remove_move(&mut self, chess_move: ChessMove) -> bool {
for x in 0..self.moves.len() {
if self.moves[x].square == chess_move.get_source() {
self.moves[x].bitboard &=!BitBoard::from_square(chess_move.get_dest());
return true;
}
}
false
}
/// For now, Only iterate moves that land on the following squares
/// Note: Once iteration is completed, you can pass in a mask of! `EMPTY`
/// to get the remaining moves, or another mask
pub fn set_iterator_mask(&mut self, mask: BitBoard) {
self.iterator_mask = mask;
self.index = 0;
// the iterator portion of this struct relies on the invariant that
// the bitboards at the beginning of the moves[] array are the only
// ones used. As a result, we must partition the list such that the
// assumption is true.
// first, find the first non-used moves index, and store that in i
let mut i = 0;
while i < self.moves.len() && self.moves[i].bitboard & self.iterator_mask!= EMPTY {
i += 1;
}
// next, find each element past i where the moves are used, and store
// that in i. Then, increment i to point to a new unused slot.
for j in (i + 1)..self.moves.len() {
if self.moves[j].bitboard & self.iterator_mask!= EMPTY {
let backup = self.moves[i];
self.moves[i] = self.moves[j];
self.moves[j] = backup;
i += 1;
}
}
}
/// This function checks the legality *only for moves generated by `MoveGen`*.
///
/// Calling this function for moves not generated by `MoveGen` will result in possibly
/// incorrect results, and making that move on the `Board` will result in undefined behavior.
/// This function may panic! if these rules are not followed.
///
/// If you are validating a move from a user, you should call the.legal() function.
pub fn legal_quick(board: &Board, chess_move: ChessMove) -> bool {
let piece = board.piece_on(chess_move.get_source()).unwrap();
match piece {
Piece::Rook => true,
Piece::Bishop => true,
Piece::Knight => true,
Piece::Queen => true,
Piece::Pawn => {
if chess_move.get_source().get_file()!= chess_move.get_dest().get_file()
&& board.piece_on(chess_move.get_dest()).is_none()
{
// en-passant
PawnType::legal_ep_move(board, chess_move.get_source(), chess_move.get_dest())
} else {
true
}
}
Piece::King => {
let bb = between(chess_move.get_source(), chess_move.get_dest());
if bb.popcnt() == 1 {
// castles
if!KingType::legal_king_move(board, bb.to_square()) {
false
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
}
}
}
/// Fastest perft test with this structure
pub fn movegen_perft_test(board: &Board, depth: usize) -> usize {
let iterable = MoveGen::new_legal(board);
let mut result: usize = 0;
if depth == 1 {
iterable.len()
} else {
for m in iterable {
let bresult = board.make_move_new(m);
result += MoveGen::movegen_perft_test(&bresult, depth - 1);
}
result
}
}
#[cfg(test)]
/// Do a perft test after splitting the moves up into two groups
pub fn movegen_perft_test_piecewise(board: &Board, depth: usize) -> usize {
let mut iterable = MoveGen::new_legal(board);
let targets = board.color_combined(!board.side_to_move());
let mut result: usize = 0;
if depth == 1 {
iterable.set_iterator_mask(*targets);
result += iterable.len();
iterable.set_iterator_mask(!targets);
result += iterable.len();
result
} else {
iterable.set_iterator_mask(*targets);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
iterable.set_iterator_mask(!EMPTY);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
result
}
}
}
impl ExactSizeIterator for MoveGen {
/// Give the exact length of this iterator
fn len(&self) -> usize {
let mut result = 0;
for i in 0..self.moves.len() {
if self.moves[i].bitboard & self.iterator_mask == EMPTY {
break;
}
if self.moves[i].promotion {
result += ((self.moves[i].bitboard & self.iterator_mask).popcnt() as usize)
* NUM_PROMOTION_PIECES;
} else {
result += (self.moves[i].bitboard & self.iterator_mask).popcnt() as usize;
}
}
result
}
}
impl Iterator for MoveGen {
type Item = ChessMove;
/// Give a size_hint to some functions that need it
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
/// Find the next chess move.
fn next(&mut self) -> Option<ChessMove> {
if self.index >= self.moves.len()
|| self.moves[self.index].bitboard & self.iterator_mask == EMPTY
{
// are we done?
None
} else if self.moves[self.index].promotion {
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
// deal with potential promotions for this pawn
let result = ChessMove::new(
moves.square,
dest,
Some(PROMOTION_PIECES[self.promotion_index]),
);
self.promotion_index += 1;
if self.promotion_index >= NUM_PROMOTION_PIECES {
moves.bitboard ^= BitBoard::from_square(dest);
self.promotion_index = 0;
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
}
Some(result)
} else {
// not a promotion move, so its a 'normal' move as far as this function is concerned
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
moves.bitboard ^= BitBoard::from_square(dest);
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
Some(ChessMove::new(moves.square, dest, None))
}
}
}
#[cfg(test)]
use crate::board_builder::BoardBuilder;
#[cfg(test)]
use std::collections::HashSet;
#[cfg(test)]
use std::convert::TryInto;
#[cfg(test)]
use std::str::FromStr;
#[cfg(test)]
fn movegen_perft_test(fen: String, depth: usize, result: usize) {
let board: Board = BoardBuilder::from_str(&fen).unwrap().try_into().unwrap();
assert_eq!(MoveGen::movegen_perft_test(&board, depth), result);
assert_eq!(MoveGen::movegen_perft_test_piecewise(&board, depth), result);
}
#[test]
fn movegen_perft_kiwipete() {
movegen_perft_test(
"r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq - 0 1".to_owned(),
5,
193690690,
);
}
#[test]
fn movegen_perft_1() {
movegen_perft_test("8/5bk1/8/2Pp4/8/1K6/8/8 w - d6 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
fn movegen_perft_2() {
movegen_perft_test("8/8/1k6/8/2pP4/8/5BK1/8 b - d3 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
fn movegen_perft_3() {
movegen_perft_test("8/8/1k6/2b5/2pP4/8/5K2/8 b - d3 0 1".to_owned(), 6, 1440467);
}
#[test]
fn movegen_perft_4() {
movegen_perft_test("8/5k2/8/2Pp4/2B5/1K6/8/8 w - d6 0 1".to_owned(), 6, 1440467);
}
#[test]
fn movegen_perft_5() {
movegen_perft_test("5k2/8/8/8/8/8/8/4K2R w K - 0 1".to_owned(), 6, 661072);
}
#[test]
fn movegen_perft_6() {
movegen_perft_test("4k2r/8/8/8/8/8/8/5K2 b k - 0 1".to_owned(), 6, 661072);
}
#[test]
fn movegen_perft_7() {
movegen_perft_test("3k4/8/8/8/8/8/8/R3K3 w Q - 0 1".to_owned(), 6, 803711);
}
#[test]
fn movegen_perft_8() {
movegen_perft_test("r3k3/8/8/8/8/8/8/3K4 b q - 0 1".to_owned(), 6, 803711);
}
#[test]
fn movegen_perft_9() {
movegen_perft_test(
"r3k2r/1b4bq/8/8/8/8/7B/R3K2R w KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
fn movegen_perft_10() {
movegen_perft_test(
"r3k2r/7b/8/8/8/8/1B4BQ/R3K2R b KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
fn movegen_perft_11() {
movegen_perft_test(
"r3k2r/8/3Q4/8/8/5q2/8/R3K2R b KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
fn movegen_perft_12() {
movegen_perft_test(
"r3k2r/8/5Q2/8/8/3q4/8/R3K2R w KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
fn movegen_perft_13() {
movegen_perft_test("2K2r2/4P3/8/8/8/8/8/3k4 w - - 0 1".to_owned(), 6, 3821001);
}
#[test]
fn movegen_perft_14() {
movegen_perft_test("3K4/8/8/8/8/8/4p3/2k2R2 b - - 0 1".to_owned(), 6, 3821001);
}
#[test]
fn movegen_perft_15() {
movegen_perft_test("8/8/1P2K3/8/2n5/1q6/8/5k2 b - - 0 1".to_owned(), 5, 1004658);
}
#[test]
fn movegen_perft_16() {
movegen_perft_test("5K2/8/1Q6/2N5/8/1p2k3/8/8 w - - 0 1".to_owned(), 5, 1004658);
}
#[test]
fn movegen_perft_17() {
movegen_perft_test("4k3/1P6/8/8/8/8/K7/8 w - - 0 1".to_owned(), 6, 217342);
}
#[test]
fn movegen_perft_18() {
movegen_perft_test("8/k7/8/8/8/8/1p6/4K3 b - - 0 1".to_owned(), 6, 217342);
}
#[test]
fn movegen_perft_19() {
movegen_perft_test("8/P1k5/K7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 92683);
}
#[test]
fn movegen_perft_20() {
movegen_perft_test("8/8/8/8/8/k7/p1K5/8 b - - 0 1".to_owned(), 6, 92683);
}
#[test]
fn movegen_perft_21() {
movegen_perft_test("K1k5/8/P7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 2217);
}
#[test]
fn | () {
movegen_perft_test("8/8/8/8/8/p7/8/k1K5 b - - 0 1".to_owned(), 6, 2217);
}
#[test]
fn movegen_perft_23() {
movegen_perft_test("8/k1P5/8/1K6/8/8/8/8 w - - 0 1".to_owned(), 7, 567584);
}
#[test]
fn movegen_perft_24() {
movegen_perft_test("8/8/8/8/1k6/8/K1p5/8 b - - 0 1".to_owned(), 7, 567584);
}
#[test]
fn movegen_perft_25() {
movegen_perft_test("8/8/2k5/5q2/5n2/8/5K2/8 b - - 0 1".to_owned(), 4, 23527);
}
#[test]
fn movegen_perft_26() {
movegen_perft_test("8/5k2/8/5N2/5Q2/2K5/8/8 w - - 0 1".to_owned(), 4, 23527);
}
#[test]
fn movegen_issue_15() {
let board =
BoardBuilder::from_str("rnbqkbnr/ppp2pp1/4p3/3N4/3PpPp1/8/PPP3PP/R1B1KBNR b KQkq f3 0 1")
.unwrap()
.try_into()
.unwrap();
let _ = MoveGen::new_legal(&board);
}
#[cfg(test)]
fn move_of(m: &str) -> ChessMove {
let promo = if m.len() > 4 {
Some(match m.as_bytes()[4] {
b'q' => Piece::Queen,
b'r' => Piece::Rook,
b'b' => Piece::Bishop,
b'n' => Piece::Knight,
_ => panic!("unrecognized uci move: {}", m),
})
} else {
None
};
ChessMove::new(
Square::from_str(&m[..2]).unwrap(),
Square::from_str(&m[2..4]).unwrap(),
promo,
)
}
#[test]
fn test_masked_move_gen() {
let board =
Board::from_str("r1bqkb1r/pp3ppp/5n2/2ppn1N1/4pP2/1BN1P3/PPPP2PP/R1BQ1RK1 w kq - 0 9")
.unwrap();
let mut capture_moves = Move | movegen_perft_22 | identifier_name |
movegen.rs | ;
use std::iter::ExactSizeIterator;
use std::mem;
#[derive(Copy, Clone, PartialEq, PartialOrd)]
pub struct SquareAndBitBoard {
square: Square,
bitboard: BitBoard,
promotion: bool,
}
impl SquareAndBitBoard {
pub fn new(sq: Square, bb: BitBoard, promotion: bool) -> SquareAndBitBoard {
SquareAndBitBoard {
square: sq,
bitboard: bb,
promotion: promotion,
}
}
}
pub type MoveList = NoDrop<ArrayVec<SquareAndBitBoard, 18>>;
/// An incremental move generator
///
/// This structure enumerates moves slightly slower than board.enumerate_moves(...),
/// but has some extra features, such as:
///
/// * Being an iterator
/// * Not requiring you to create a buffer
/// * Only iterating moves that match a certain pattern
/// * Being iterable multiple times (such as, iterating once for all captures, then iterating again
/// for all quiets)
/// * Doing as little work early on as possible, so that if you are not going to look at every move, the
/// struture moves faster
/// * Being able to iterate pseudo legal moves, while keeping the (nearly) free legality checks in
/// place
///
/// # Examples
///
/// ```
/// use chess::MoveGen;
/// use chess::Board;
/// use chess::EMPTY;
/// use chess::construct;
///
/// // create a board with the initial position
/// let board = Board::default();
///
/// // create an iterable
/// let mut iterable = MoveGen::new_legal(&board);
///
/// // make sure.len() works.
/// assert_eq!(iterable.len(), 20); // the.len() function does *not* consume the iterator
///
/// // lets iterate over targets.
/// let targets = board.color_combined(!board.side_to_move());
/// iterable.set_iterator_mask(*targets);
///
/// // count the number of targets
/// let mut count = 0;
/// for _ in &mut iterable {
/// count += 1;
/// // This move captures one of my opponents pieces (with the exception of en passant)
/// }
///
/// // now, iterate over the rest of the moves
/// iterable.set_iterator_mask(!EMPTY);
/// for _ in &mut iterable {
/// count += 1;
/// // This move does not capture anything
/// }
///
/// // make sure it works
/// assert_eq!(count, 20);
///
/// ```
pub struct MoveGen {
moves: MoveList,
promotion_index: usize,
iterator_mask: BitBoard,
index: usize,
}
impl MoveGen {
#[inline(always)]
fn enumerate_moves(board: &Board) -> MoveList {
let checkers = *board.checkers();
let mask =!board.color_combined(board.side_to_move());
let mut movelist = NoDrop::new(ArrayVec::<SquareAndBitBoard, 18>::new());
if checkers == EMPTY {
PawnType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KnightType::legals::<NotInCheckType>(&mut movelist, &board, mask);
BishopType::legals::<NotInCheckType>(&mut movelist, &board, mask);
RookType::legals::<NotInCheckType>(&mut movelist, &board, mask);
QueenType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KingType::legals::<NotInCheckType>(&mut movelist, &board, mask);
} else if checkers.popcnt() == 1 {
PawnType::legals::<InCheckType>(&mut movelist, &board, mask);
KnightType::legals::<InCheckType>(&mut movelist, &board, mask);
BishopType::legals::<InCheckType>(&mut movelist, &board, mask);
RookType::legals::<InCheckType>(&mut movelist, &board, mask);
QueenType::legals::<InCheckType>(&mut movelist, &board, mask);
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
} else {
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
}
movelist
}
/// Create a new `MoveGen` structure, only generating legal moves
#[inline(always)]
pub fn new_legal(board: &Board) -> MoveGen {
MoveGen {
moves: MoveGen::enumerate_moves(board),
promotion_index: 0,
iterator_mask:!EMPTY,
index: 0,
}
}
/// Never, ever, iterate any moves that land on the following squares
pub fn remove_mask(&mut self, mask: BitBoard) {
for x in 0..self.moves.len() {
self.moves[x].bitboard &=!mask;
}
}
/// Never, ever, iterate this move
pub fn remove_move(&mut self, chess_move: ChessMove) -> bool {
for x in 0..self.moves.len() {
if self.moves[x].square == chess_move.get_source() {
self.moves[x].bitboard &=!BitBoard::from_square(chess_move.get_dest());
return true;
}
}
false
}
/// For now, Only iterate moves that land on the following squares
/// Note: Once iteration is completed, you can pass in a mask of! `EMPTY`
/// to get the remaining moves, or another mask
pub fn set_iterator_mask(&mut self, mask: BitBoard) {
self.iterator_mask = mask;
self.index = 0;
// the iterator portion of this struct relies on the invariant that
// the bitboards at the beginning of the moves[] array are the only
// ones used. As a result, we must partition the list such that the
// assumption is true.
// first, find the first non-used moves index, and store that in i
let mut i = 0;
while i < self.moves.len() && self.moves[i].bitboard & self.iterator_mask!= EMPTY {
i += 1;
}
// next, find each element past i where the moves are used, and store
// that in i. Then, increment i to point to a new unused slot.
for j in (i + 1)..self.moves.len() {
if self.moves[j].bitboard & self.iterator_mask!= EMPTY {
let backup = self.moves[i];
self.moves[i] = self.moves[j];
self.moves[j] = backup;
i += 1;
}
}
}
/// This function checks the legality *only for moves generated by `MoveGen`*.
///
/// Calling this function for moves not generated by `MoveGen` will result in possibly
/// incorrect results, and making that move on the `Board` will result in undefined behavior.
/// This function may panic! if these rules are not followed.
///
/// If you are validating a move from a user, you should call the.legal() function.
pub fn legal_quick(board: &Board, chess_move: ChessMove) -> bool {
let piece = board.piece_on(chess_move.get_source()).unwrap();
match piece {
Piece::Rook => true,
Piece::Bishop => true,
Piece::Knight => true,
Piece::Queen => true,
Piece::Pawn => {
if chess_move.get_source().get_file()!= chess_move.get_dest().get_file()
&& board.piece_on(chess_move.get_dest()).is_none()
{
// en-passant
PawnType::legal_ep_move(board, chess_move.get_source(), chess_move.get_dest())
} else {
true
}
}
Piece::King => {
let bb = between(chess_move.get_source(), chess_move.get_dest());
if bb.popcnt() == 1 {
// castles
if!KingType::legal_king_move(board, bb.to_square()) {
false
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
}
}
}
/// Fastest perft test with this structure
pub fn movegen_perft_test(board: &Board, depth: usize) -> usize {
let iterable = MoveGen::new_legal(board);
let mut result: usize = 0;
if depth == 1 {
iterable.len()
} else {
for m in iterable {
let bresult = board.make_move_new(m);
result += MoveGen::movegen_perft_test(&bresult, depth - 1);
}
result
}
}
#[cfg(test)]
/// Do a perft test after splitting the moves up into two groups
pub fn movegen_perft_test_piecewise(board: &Board, depth: usize) -> usize {
let mut iterable = MoveGen::new_legal(board);
let targets = board.color_combined(!board.side_to_move());
let mut result: usize = 0;
if depth == 1 {
iterable.set_iterator_mask(*targets);
result += iterable.len();
iterable.set_iterator_mask(!targets);
result += iterable.len();
result
} else {
iterable.set_iterator_mask(*targets);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
iterable.set_iterator_mask(!EMPTY);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
result
}
}
}
impl ExactSizeIterator for MoveGen {
/// Give the exact length of this iterator
fn len(&self) -> usize {
let mut result = 0;
for i in 0..self.moves.len() {
if self.moves[i].bitboard & self.iterator_mask == EMPTY {
break;
}
if self.moves[i].promotion {
result += ((self.moves[i].bitboard & self.iterator_mask).popcnt() as usize)
* NUM_PROMOTION_PIECES;
} else {
result += (self.moves[i].bitboard & self.iterator_mask).popcnt() as usize;
}
}
result
}
}
impl Iterator for MoveGen {
type Item = ChessMove;
/// Give a size_hint to some functions that need it
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
/// Find the next chess move.
fn next(&mut self) -> Option<ChessMove> {
if self.index >= self.moves.len()
|| self.moves[self.index].bitboard & self.iterator_mask == EMPTY
{
// are we done?
None
} else if self.moves[self.index].promotion {
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
// deal with potential promotions for this pawn
let result = ChessMove::new(
moves.square,
dest,
Some(PROMOTION_PIECES[self.promotion_index]),
);
self.promotion_index += 1;
if self.promotion_index >= NUM_PROMOTION_PIECES {
moves.bitboard ^= BitBoard::from_square(dest);
self.promotion_index = 0;
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
}
Some(result)
} else {
// not a promotion move, so its a 'normal' move as far as this function is concerned
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
moves.bitboard ^= BitBoard::from_square(dest);
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
Some(ChessMove::new(moves.square, dest, None))
}
}
}
#[cfg(test)]
use crate::board_builder::BoardBuilder;
#[cfg(test)]
use std::collections::HashSet;
#[cfg(test)]
use std::convert::TryInto;
#[cfg(test)]
use std::str::FromStr;
#[cfg(test)]
fn movegen_perft_test(fen: String, depth: usize, result: usize) {
let board: Board = BoardBuilder::from_str(&fen).unwrap().try_into().unwrap();
assert_eq!(MoveGen::movegen_perft_test(&board, depth), result);
assert_eq!(MoveGen::movegen_perft_test_piecewise(&board, depth), result);
}
#[test]
fn movegen_perft_kiwipete() {
movegen_perft_test(
"r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq - 0 1".to_owned(),
5,
193690690,
);
}
#[test]
fn movegen_perft_1() |
#[test]
fn movegen_perft_2() {
movegen_perft_test("8/8/1k6/8/2pP4/8/5BK1/8 b - d3 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
fn movegen_perft_3() {
movegen_perft_test("8/8/1k6/2b5/2pP4/8/5K2/8 b - d3 0 1".to_owned(), 6, 1440467);
}
#[test]
fn movegen_perft_4() {
movegen_perft_test("8/5k2/8/2Pp4/2B5/1K6/8/8 w - d6 0 1".to_owned(), 6, 1440467);
}
#[test]
fn movegen_perft_5() {
movegen_perft_test("5k2/8/8/8/8/8/8/4K2R w K - 0 1".to_owned(), 6, 661072);
}
#[test]
fn movegen_perft_6() {
movegen_perft_test("4k2r/8/8/8/8/8/8/5K2 b k - 0 1".to_owned(), 6, 661072);
}
#[test]
fn movegen_perft_7() {
movegen_perft_test("3k4/8/8/8/8/8/8/R3K3 w Q - 0 1".to_owned(), 6, 803711);
}
#[test]
fn movegen_perft_8() {
movegen_perft_test("r3k3/8/8/8/8/8/8/3K4 b q - 0 1".to_owned(), 6, 803711);
}
#[test]
fn movegen_perft_9() {
movegen_perft_test(
"r3k2r/1b4bq/8/8/8/8/7B/R3K2R w KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
fn movegen_perft_10() {
movegen_perft_test(
"r3k2r/7b/8/8/8/8/1B4BQ/R3K2R b KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
fn movegen_perft_11() {
movegen_perft_test(
"r3k2r/8/3Q4/8/8/5q2/8/R3K2R b KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
fn movegen_perft_12() {
movegen_perft_test(
"r3k2r/8/5Q2/8/8/3q4/8/R3K2R w KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
fn movegen_perft_13() {
movegen_perft_test("2K2r2/4P3/8/8/8/8/8/3k4 w - - 0 1".to_owned(), 6, 3821001);
}
#[test]
fn movegen_perft_14() {
movegen_perft_test("3K4/8/8/8/8/8/4p3/2k2R2 b - - 0 1".to_owned(), 6, 3821001);
}
#[test]
fn movegen_perft_15() {
movegen_perft_test("8/8/1P2K3/8/2n5/1q6/8/5k2 b - - 0 1".to_owned(), 5, 1004658);
}
#[test]
fn movegen_perft_16() {
movegen_perft_test("5K2/8/1Q6/2N5/8/1p2k3/8/8 w - - 0 1".to_owned(), 5, 1004658);
}
#[test]
fn movegen_perft_17() {
movegen_perft_test("4k3/1P6/8/8/8/8/K7/8 w - - 0 1".to_owned(), 6, 217342);
}
#[test]
fn movegen_perft_18() {
movegen_perft_test("8/k7/8/8/8/8/1p6/4K3 b - - 0 1".to_owned(), 6, 217342);
}
#[test]
fn movegen_perft_19() {
movegen_perft_test("8/P1k5/K7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 92683);
}
#[test]
fn movegen_perft_20() {
movegen_perft_test("8/8/8/8/8/k7/p1K5/8 b - - 0 1".to_owned(), 6, 92683);
}
#[test]
fn movegen_perft_21() {
movegen_perft_test("K1k5/8/P7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 2217);
}
#[test]
fn movegen_perft_22() {
movegen_perft_test("8/8/8/8/8/p7/8/k1K5 b - - 0 1".to_owned(), 6, 2217);
}
#[test]
fn movegen_perft_23() {
movegen_perft_test("8/k1P5/8/1K6/8/8/8/8 w - - 0 1".to_owned(), 7, 567584);
}
#[test]
fn movegen_perft_24() {
movegen_perft_test("8/8/8/8/1k6/8/K1p5/8 b - - 0 1".to_owned(), 7, 567584);
}
#[test]
fn movegen_perft_25() {
movegen_perft_test("8/8/2k5/5q2/5n2/8/5K2/8 b - - 0 1".to_owned(), 4, 23527);
}
#[test]
fn movegen_perft_26() {
movegen_perft_test("8/5k2/8/5N2/5Q2/2K5/8/8 w - - 0 1".to_owned(), 4, 23527);
}
#[test]
fn movegen_issue_15() {
let board =
BoardBuilder::from_str("rnbqkbnr/ppp2pp1/4p3/3N4/3PpPp1/8/PPP3PP/R1B1KBNR b KQkq f3 0 1")
.unwrap()
.try_into()
.unwrap();
let _ = MoveGen::new_legal(&board);
}
#[cfg(test)]
fn move_of(m: &str) -> ChessMove {
let promo = if m.len() > 4 {
Some(match m.as_bytes()[4] {
b'q' => Piece::Queen,
b'r' => Piece::Rook,
b'b' => Piece::Bishop,
b'n' => Piece::Knight,
_ => panic!("unrecognized uci move: {}", m),
})
} else {
None
};
ChessMove::new(
Square::from_str(&m[..2]).unwrap(),
Square::from_str(&m[2..4]).unwrap(),
promo,
)
}
#[test]
fn test_masked_move_gen() {
let board =
Board::from_str("r1bqkb1r/pp3ppp/5n2/2ppn1N1/4pP2/1BN1P3/PPPP2PP/R1BQ1RK1 w kq - 0 9")
.unwrap();
let mut capture_moves = Move | {
movegen_perft_test("8/5bk1/8/2Pp4/8/1K6/8/8 w - d6 0 1".to_owned(), 6, 824064);
// Invalid FEN
} | identifier_body |
update_menu.rs | //! Buttons and Dropdowns.
use plotly_derive::FieldSetter;
use serde::Serialize;
use serde_json::{Map, Value};
use crate::{
color::Color,
common::{Anchor, Font, Pad},
Relayout, Restyle,
};
/// Sets the Plotly method to be called on click. If the `skip` method is used,
/// the API updatemenu will function as normal but will perform no API calls and
/// will not bind automatically to state updates. This may be used to create a
/// component interface and attach to updatemenu events manually via JavaScript.
#[derive(Serialize, Debug, Copy, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ButtonMethod {
/// The restyle method should be used when modifying the data and data
/// attributes of the graph
Restyle,
/// The relayout method should be used when modifying the layout attributes
/// of the graph.
Relayout,
Animate,
/// The update method should be used when modifying the data and layout
/// sections of the graph.
Update,
Skip,
}
#[serde_with::skip_serializing_none]
#[derive(Serialize, Clone, Debug, FieldSetter)]
pub struct Button {
/// Sets the arguments values to be passed to the Plotly method set in
/// `method` on click.
args: Option<Value>,
/// Sets a 2nd set of `args`, these arguments values are passed to the
/// Plotly method set in `method` when clicking this button while in the
/// active state. Use this to create toggle buttons.
args2: Option<Value>,
/// When true, the API method is executed. When false, all other behaviors
/// are the same and command execution is skipped. This may be useful
/// when hooking into, for example, the `plotly_buttonclicked` method
/// and executing the API command manually without losing the benefit of
/// the updatemenu automatically binding to the state of the plot through
/// the specification of `method` and `args`.
///
/// Default: true
execute: Option<bool>,
/// Sets the text label to appear on the button.
label: Option<String>,
/// Sets the Plotly method to be called on click. If the `skip` method is
/// used, the API updatemenu will function as normal but will perform no
/// API calls and will not bind automatically to state updates. This may
/// be used to create a component interface and attach to updatemenu
/// events manually via JavaScript.
method: Option<ButtonMethod>,
/// When used in a template, named items are created in the output figure in
/// addition to any items the figure already has in this array. You can
/// modify these items in the output figure by making your own item with
/// `templateitemname` matching this `name` alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). Has no
/// effect outside of a template.
name: Option<String>,
/// Used to refer to a named item in this array in the template. Named items
/// from the template will be created even without a matching item in
/// the input figure, but you can modify one by making an item with
/// `templateitemname` matching its `name`, alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). If there is
/// no template or no matching item, this item will be hidden unless you
/// explicitly show it with `visible: true`
#[serde(rename = "templateitemname")]
template_item_name: Option<String>,
/// Determines whether or not this button is visible.
visible: Option<bool>,
}
impl Button {
pub fn new() -> Self {
Default::default()
}
}
/// Builder struct to create buttons which can do restyles and/or relayouts
#[derive(FieldSetter)]
pub struct ButtonBuilder {
label: Option<String>,
name: Option<String>,
template_item_name: Option<String>,
visible: Option<bool>,
#[field_setter(default = "Map::new()")]
restyles: Map<String, Value>,
#[field_setter(default = "Map::new()")]
relayouts: Map<String, Value>,
}
impl ButtonBuilder {
pub fn new() -> Self {
Default::default()
}
pub fn push_restyle(mut self, restyle: impl Restyle + Serialize) -> Self {
let restyle = serde_json::to_value(&restyle).unwrap();
for (k, v) in restyle.as_object().unwrap() {
self.restyles.insert(k.clone(), v.clone());
}
self
}
pub fn push_relayout(mut self, relayout: impl Relayout + Serialize) -> Self {
let relayout = serde_json::to_value(&relayout).unwrap();
for (k, v) in relayout.as_object().unwrap() {
self.relayouts.insert(k.clone(), v.clone());
}
self
}
fn method_and_args(
restyles: Map<String, Value>,
relayouts: Map<String, Value>,
) -> (ButtonMethod, Value) {
match (restyles.is_empty(), relayouts.is_empty()) {
(true, true) => (ButtonMethod::Skip, Value::Null),
(false, true) => (ButtonMethod::Restyle, vec![restyles].into()),
(true, false) => (ButtonMethod::Relayout, vec![relayouts].into()),
(false, false) => (ButtonMethod::Update, vec![restyles, relayouts].into()),
}
}
pub fn build(self) -> Button {
let (method, args) = Self::method_and_args(self.restyles, self.relayouts);
Button {
label: self.label,
args: Some(args),
method: Some(method),
name: self.name,
template_item_name: self.template_item_name,
visible: self.visible,
..Default::default()
}
}
}
/// Determines whether the buttons are accessible via a dropdown menu or whether
/// the buttons are stacked horizontally or vertically
///
/// Default: "dropdown"
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum UpdateMenuType {
Dropdown,
Buttons,
}
/// Determines the direction in which the buttons are laid out, whether in a
/// dropdown menu or a row/column of buttons. For `left` and `up`, the buttons
/// will still appear in left-to-right or top-to-bottom order respectively.
///
/// Default: "down"
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum UpdateMenuDirection {
Left,
Right,
Up,
Down,
}
#[serde_with::skip_serializing_none]
#[derive(Serialize, Debug, FieldSetter, Clone)]
pub struct UpdateMenu {
/// Determines which button (by index starting from 0) is considered active.
active: Option<i32>,
/// Sets the background color of the update menu buttons.
#[serde(rename = "bgcolor")]
background_color: Option<Box<dyn Color>>,
/// Sets the color of the border enclosing the update menu.
#[serde(rename = "bordercolor")]
border_color: Option<Box<dyn Color>>,
/// Sets the width (in px) of the border enclosing the update menu.
#[serde(rename = "borderwidth")]
border_width: Option<usize>,
buttons: Option<Vec<Button>>,
/// Determines the direction in which the buttons are laid out, whether in
/// a dropdown menu or a row/column of buttons. For `left` and `up`,
/// the buttons will still appear in left-to-right or top-to-bottom order
/// respectively.
direction: Option<UpdateMenuDirection>,
/// Sets the font of the update menu button text.
font: Option<Font>,
/// When used in a template, named items are created in the output figure in
/// addition to any items the figure already has in this array. You can
/// modify these items in the output figure by making your own item with
/// `templateitemname` matching this `name` alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). Has no
/// effect outside of a template.
name: Option<String>,
/// Sets the padding around the buttons or dropdown menu.
pad: Option<Pad>,
/// Highlights active dropdown item or active button if true.
#[serde(rename = "showactive")]
show_active: Option<bool>,
/// Used to refer to a named item in this array in the template. Named items
/// from the template will be created even without a matching item in
/// the input figure, but you can modify one by making an item with
/// `templateitemname` matching its `name`, alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). If there is
/// no template or no matching item, this item will be hidden unless you
/// explicitly show it with `visible: true`.
template_item_name: Option<String>,
/// Determines whether the buttons are accessible via a dropdown menu or
/// whether the buttons are stacked horizontally or vertically
#[serde(rename = "type")]
ty: Option<UpdateMenuType>,
/// Determines whether or not the update menu is visible.
visible: Option<bool>,
/// Type: number between or equal to -2 and 3
/// Default: -0.05
/// Sets the x position (in normalized coordinates) of the update menu.
x: Option<f64>,
/// Sets the update menu's horizontal position anchor. This anchor binds the
/// `x` position to the "left", "center" or "right" of the range
/// selector. Default: "right"
#[serde(rename = "xanchor")]
x_anchor: Option<Anchor>,
/// Type: number between or equal to -2 and 3
/// Default: 1
/// Sets the y position (in normalized coordinates) of the update menu.
y: Option<f64>,
/// Sets the update menu's vertical position anchor This anchor binds the
/// `y` position to the "top", "middle" or "bottom" of the range
/// selector. Default: "top"
#[serde(rename = "yanchor")]
y_anchor: Option<Anchor>,
}
impl UpdateMenu {
pub fn new() -> Self {
Default::default()
}
}
#[cfg(test)]
mod tests {
use serde_json::{json, to_value};
use super::*;
use crate::{
common::{Title, Visible},
Layout,
};
#[test]
fn test_serialize_button_method() {
assert_eq!(to_value(ButtonMethod::Restyle).unwrap(), json!("restyle"));
assert_eq!(to_value(ButtonMethod::Relayout).unwrap(), json!("relayout"));
assert_eq!(to_value(ButtonMethod::Animate).unwrap(), json!("animate"));
assert_eq!(to_value(ButtonMethod::Update).unwrap(), json!("update"));
assert_eq!(to_value(ButtonMethod::Skip).unwrap(), json!("skip"));
}
#[test]
fn | () {
let button = Button::new()
.args(json!([
{ "visible": [true, false] },
{ "width": 20},
]))
.args2(json!([]))
.execute(true)
.label("Label")
.method(ButtonMethod::Update)
.name("Name")
.template_item_name("Template")
.visible(true);
let expected = json!({
"args": [
{ "visible": [true, false] },
{ "width": 20},
],
"args2": [],
"execute": true,
"label": "Label",
"method": "update",
"name": "Name",
"templateitemname": "Template",
"visible": true,
});
assert_eq!(to_value(button).unwrap(), expected);
}
#[test]
fn test_button_builder() {
let expected = json!({
"args": [
{ "visible": [true, false] },
{ "title": {"text": "Hello"}, "width": 20},
],
"label": "Label",
"method": "update",
"name": "Name",
"templateitemname": "Template",
"visible": true,
});
let button = ButtonBuilder::new()
.label("Label")
.name("Name")
.template_item_name("Template")
.visible(true)
.push_restyle(crate::Bar::<i32, i32>::modify_visible(vec![
Visible::True,
Visible::False,
]))
.push_relayout(Layout::modify_title(Title::new("Hello")))
.push_relayout(Layout::modify_width(20))
.build();
assert_eq!(to_value(button).unwrap(), expected);
}
}
| test_serialize_button | identifier_name |
update_menu.rs | use plotly_derive::FieldSetter;
use serde::Serialize;
use serde_json::{Map, Value};
use crate::{
color::Color,
common::{Anchor, Font, Pad},
Relayout, Restyle,
};
/// Sets the Plotly method to be called on click. If the `skip` method is used,
/// the API updatemenu will function as normal but will perform no API calls and
/// will not bind automatically to state updates. This may be used to create a
/// component interface and attach to updatemenu events manually via JavaScript.
#[derive(Serialize, Debug, Copy, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ButtonMethod {
/// The restyle method should be used when modifying the data and data
/// attributes of the graph
Restyle,
/// The relayout method should be used when modifying the layout attributes
/// of the graph.
Relayout,
Animate,
/// The update method should be used when modifying the data and layout
/// sections of the graph.
Update,
Skip,
}
#[serde_with::skip_serializing_none]
#[derive(Serialize, Clone, Debug, FieldSetter)]
pub struct Button {
/// Sets the arguments values to be passed to the Plotly method set in
/// `method` on click.
args: Option<Value>,
/// Sets a 2nd set of `args`, these arguments values are passed to the
/// Plotly method set in `method` when clicking this button while in the
/// active state. Use this to create toggle buttons.
args2: Option<Value>,
/// When true, the API method is executed. When false, all other behaviors
/// are the same and command execution is skipped. This may be useful
/// when hooking into, for example, the `plotly_buttonclicked` method
/// and executing the API command manually without losing the benefit of
/// the updatemenu automatically binding to the state of the plot through
/// the specification of `method` and `args`.
///
/// Default: true
execute: Option<bool>,
/// Sets the text label to appear on the button.
label: Option<String>,
/// Sets the Plotly method to be called on click. If the `skip` method is
/// used, the API updatemenu will function as normal but will perform no
/// API calls and will not bind automatically to state updates. This may
/// be used to create a component interface and attach to updatemenu
/// events manually via JavaScript.
method: Option<ButtonMethod>,
/// When used in a template, named items are created in the output figure in
/// addition to any items the figure already has in this array. You can
/// modify these items in the output figure by making your own item with
/// `templateitemname` matching this `name` alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). Has no
/// effect outside of a template.
name: Option<String>,
/// Used to refer to a named item in this array in the template. Named items
/// from the template will be created even without a matching item in
/// the input figure, but you can modify one by making an item with
/// `templateitemname` matching its `name`, alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). If there is
/// no template or no matching item, this item will be hidden unless you
/// explicitly show it with `visible: true`
#[serde(rename = "templateitemname")]
template_item_name: Option<String>,
/// Determines whether or not this button is visible.
visible: Option<bool>,
}
impl Button {
pub fn new() -> Self {
Default::default()
}
}
/// Builder struct to create buttons which can do restyles and/or relayouts
#[derive(FieldSetter)]
pub struct ButtonBuilder {
label: Option<String>,
name: Option<String>,
template_item_name: Option<String>,
visible: Option<bool>,
#[field_setter(default = "Map::new()")]
restyles: Map<String, Value>,
#[field_setter(default = "Map::new()")]
relayouts: Map<String, Value>,
}
impl ButtonBuilder {
pub fn new() -> Self {
Default::default()
}
pub fn push_restyle(mut self, restyle: impl Restyle + Serialize) -> Self {
let restyle = serde_json::to_value(&restyle).unwrap();
for (k, v) in restyle.as_object().unwrap() {
self.restyles.insert(k.clone(), v.clone());
}
self
}
pub fn push_relayout(mut self, relayout: impl Relayout + Serialize) -> Self {
let relayout = serde_json::to_value(&relayout).unwrap();
for (k, v) in relayout.as_object().unwrap() {
self.relayouts.insert(k.clone(), v.clone());
}
self
}
fn method_and_args(
restyles: Map<String, Value>,
relayouts: Map<String, Value>,
) -> (ButtonMethod, Value) {
match (restyles.is_empty(), relayouts.is_empty()) {
(true, true) => (ButtonMethod::Skip, Value::Null),
(false, true) => (ButtonMethod::Restyle, vec![restyles].into()),
(true, false) => (ButtonMethod::Relayout, vec![relayouts].into()),
(false, false) => (ButtonMethod::Update, vec![restyles, relayouts].into()),
}
}
pub fn build(self) -> Button {
let (method, args) = Self::method_and_args(self.restyles, self.relayouts);
Button {
label: self.label,
args: Some(args),
method: Some(method),
name: self.name,
template_item_name: self.template_item_name,
visible: self.visible,
..Default::default()
}
}
}
/// Determines whether the buttons are accessible via a dropdown menu or whether
/// the buttons are stacked horizontally or vertically
///
/// Default: "dropdown"
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum UpdateMenuType {
Dropdown,
Buttons,
}
/// Determines the direction in which the buttons are laid out, whether in a
/// dropdown menu or a row/column of buttons. For `left` and `up`, the buttons
/// will still appear in left-to-right or top-to-bottom order respectively.
///
/// Default: "down"
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum UpdateMenuDirection {
Left,
Right,
Up,
Down,
}
#[serde_with::skip_serializing_none]
#[derive(Serialize, Debug, FieldSetter, Clone)]
pub struct UpdateMenu {
/// Determines which button (by index starting from 0) is considered active.
active: Option<i32>,
/// Sets the background color of the update menu buttons.
#[serde(rename = "bgcolor")]
background_color: Option<Box<dyn Color>>,
/// Sets the color of the border enclosing the update menu.
#[serde(rename = "bordercolor")]
border_color: Option<Box<dyn Color>>,
/// Sets the width (in px) of the border enclosing the update menu.
#[serde(rename = "borderwidth")]
border_width: Option<usize>,
buttons: Option<Vec<Button>>,
/// Determines the direction in which the buttons are laid out, whether in
/// a dropdown menu or a row/column of buttons. For `left` and `up`,
/// the buttons will still appear in left-to-right or top-to-bottom order
/// respectively.
direction: Option<UpdateMenuDirection>,
/// Sets the font of the update menu button text.
font: Option<Font>,
/// When used in a template, named items are created in the output figure in
/// addition to any items the figure already has in this array. You can
/// modify these items in the output figure by making your own item with
/// `templateitemname` matching this `name` alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). Has no
/// effect outside of a template.
name: Option<String>,
/// Sets the padding around the buttons or dropdown menu.
pad: Option<Pad>,
/// Highlights active dropdown item or active button if true.
#[serde(rename = "showactive")]
show_active: Option<bool>,
/// Used to refer to a named item in this array in the template. Named items
/// from the template will be created even without a matching item in
/// the input figure, but you can modify one by making an item with
/// `templateitemname` matching its `name`, alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). If there is
/// no template or no matching item, this item will be hidden unless you
/// explicitly show it with `visible: true`.
template_item_name: Option<String>,
/// Determines whether the buttons are accessible via a dropdown menu or
/// whether the buttons are stacked horizontally or vertically
#[serde(rename = "type")]
ty: Option<UpdateMenuType>,
/// Determines whether or not the update menu is visible.
visible: Option<bool>,
/// Type: number between or equal to -2 and 3
/// Default: -0.05
/// Sets the x position (in normalized coordinates) of the update menu.
x: Option<f64>,
/// Sets the update menu's horizontal position anchor. This anchor binds the
/// `x` position to the "left", "center" or "right" of the range
/// selector. Default: "right"
#[serde(rename = "xanchor")]
x_anchor: Option<Anchor>,
/// Type: number between or equal to -2 and 3
/// Default: 1
/// Sets the y position (in normalized coordinates) of the update menu.
y: Option<f64>,
/// Sets the update menu's vertical position anchor This anchor binds the
/// `y` position to the "top", "middle" or "bottom" of the range
/// selector. Default: "top"
#[serde(rename = "yanchor")]
y_anchor: Option<Anchor>,
}
impl UpdateMenu {
pub fn new() -> Self {
Default::default()
}
}
#[cfg(test)]
mod tests {
use serde_json::{json, to_value};
use super::*;
use crate::{
common::{Title, Visible},
Layout,
};
#[test]
fn test_serialize_button_method() {
assert_eq!(to_value(ButtonMethod::Restyle).unwrap(), json!("restyle"));
assert_eq!(to_value(ButtonMethod::Relayout).unwrap(), json!("relayout"));
assert_eq!(to_value(ButtonMethod::Animate).unwrap(), json!("animate"));
assert_eq!(to_value(ButtonMethod::Update).unwrap(), json!("update"));
assert_eq!(to_value(ButtonMethod::Skip).unwrap(), json!("skip"));
}
#[test]
fn test_serialize_button() {
let button = Button::new()
.args(json!([
{ "visible": [true, false] },
{ "width": 20},
]))
.args2(json!([]))
.execute(true)
.label("Label")
.method(ButtonMethod::Update)
.name("Name")
.template_item_name("Template")
.visible(true);
let expected = json!({
"args": [
{ "visible": [true, false] },
{ "width": 20},
],
"args2": [],
"execute": true,
"label": "Label",
"method": "update",
"name": "Name",
"templateitemname": "Template",
"visible": true,
});
assert_eq!(to_value(button).unwrap(), expected);
}
#[test]
fn test_button_builder() {
let expected = json!({
"args": [
{ "visible": [true, false] },
{ "title": {"text": "Hello"}, "width": 20},
],
"label": "Label",
"method": "update",
"name": "Name",
"templateitemname": "Template",
"visible": true,
});
let button = ButtonBuilder::new()
.label("Label")
.name("Name")
.template_item_name("Template")
.visible(true)
.push_restyle(crate::Bar::<i32, i32>::modify_visible(vec![
Visible::True,
Visible::False,
]))
.push_relayout(Layout::modify_title(Title::new("Hello")))
.push_relayout(Layout::modify_width(20))
.build();
assert_eq!(to_value(button).unwrap(), expected);
}
} | //! Buttons and Dropdowns.
| random_line_split |
|
update_menu.rs | //! Buttons and Dropdowns.
use plotly_derive::FieldSetter;
use serde::Serialize;
use serde_json::{Map, Value};
use crate::{
color::Color,
common::{Anchor, Font, Pad},
Relayout, Restyle,
};
/// Sets the Plotly method to be called on click. If the `skip` method is used,
/// the API updatemenu will function as normal but will perform no API calls and
/// will not bind automatically to state updates. This may be used to create a
/// component interface and attach to updatemenu events manually via JavaScript.
#[derive(Serialize, Debug, Copy, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ButtonMethod {
/// The restyle method should be used when modifying the data and data
/// attributes of the graph
Restyle,
/// The relayout method should be used when modifying the layout attributes
/// of the graph.
Relayout,
Animate,
/// The update method should be used when modifying the data and layout
/// sections of the graph.
Update,
Skip,
}
#[serde_with::skip_serializing_none]
#[derive(Serialize, Clone, Debug, FieldSetter)]
pub struct Button {
/// Sets the arguments values to be passed to the Plotly method set in
/// `method` on click.
args: Option<Value>,
/// Sets a 2nd set of `args`, these arguments values are passed to the
/// Plotly method set in `method` when clicking this button while in the
/// active state. Use this to create toggle buttons.
args2: Option<Value>,
/// When true, the API method is executed. When false, all other behaviors
/// are the same and command execution is skipped. This may be useful
/// when hooking into, for example, the `plotly_buttonclicked` method
/// and executing the API command manually without losing the benefit of
/// the updatemenu automatically binding to the state of the plot through
/// the specification of `method` and `args`.
///
/// Default: true
execute: Option<bool>,
/// Sets the text label to appear on the button.
label: Option<String>,
/// Sets the Plotly method to be called on click. If the `skip` method is
/// used, the API updatemenu will function as normal but will perform no
/// API calls and will not bind automatically to state updates. This may
/// be used to create a component interface and attach to updatemenu
/// events manually via JavaScript.
method: Option<ButtonMethod>,
/// When used in a template, named items are created in the output figure in
/// addition to any items the figure already has in this array. You can
/// modify these items in the output figure by making your own item with
/// `templateitemname` matching this `name` alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). Has no
/// effect outside of a template.
name: Option<String>,
/// Used to refer to a named item in this array in the template. Named items
/// from the template will be created even without a matching item in
/// the input figure, but you can modify one by making an item with
/// `templateitemname` matching its `name`, alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). If there is
/// no template or no matching item, this item will be hidden unless you
/// explicitly show it with `visible: true`
#[serde(rename = "templateitemname")]
template_item_name: Option<String>,
/// Determines whether or not this button is visible.
visible: Option<bool>,
}
impl Button {
pub fn new() -> Self {
Default::default()
}
}
/// Builder struct to create buttons which can do restyles and/or relayouts
#[derive(FieldSetter)]
pub struct ButtonBuilder {
label: Option<String>,
name: Option<String>,
template_item_name: Option<String>,
visible: Option<bool>,
#[field_setter(default = "Map::new()")]
restyles: Map<String, Value>,
#[field_setter(default = "Map::new()")]
relayouts: Map<String, Value>,
}
impl ButtonBuilder {
pub fn new() -> Self {
Default::default()
}
pub fn push_restyle(mut self, restyle: impl Restyle + Serialize) -> Self {
let restyle = serde_json::to_value(&restyle).unwrap();
for (k, v) in restyle.as_object().unwrap() {
self.restyles.insert(k.clone(), v.clone());
}
self
}
pub fn push_relayout(mut self, relayout: impl Relayout + Serialize) -> Self {
let relayout = serde_json::to_value(&relayout).unwrap();
for (k, v) in relayout.as_object().unwrap() {
self.relayouts.insert(k.clone(), v.clone());
}
self
}
fn method_and_args(
restyles: Map<String, Value>,
relayouts: Map<String, Value>,
) -> (ButtonMethod, Value) {
match (restyles.is_empty(), relayouts.is_empty()) {
(true, true) => (ButtonMethod::Skip, Value::Null),
(false, true) => (ButtonMethod::Restyle, vec![restyles].into()),
(true, false) => (ButtonMethod::Relayout, vec![relayouts].into()),
(false, false) => (ButtonMethod::Update, vec![restyles, relayouts].into()),
}
}
pub fn build(self) -> Button {
let (method, args) = Self::method_and_args(self.restyles, self.relayouts);
Button {
label: self.label,
args: Some(args),
method: Some(method),
name: self.name,
template_item_name: self.template_item_name,
visible: self.visible,
..Default::default()
}
}
}
/// Determines whether the buttons are accessible via a dropdown menu or whether
/// the buttons are stacked horizontally or vertically
///
/// Default: "dropdown"
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum UpdateMenuType {
Dropdown,
Buttons,
}
/// Determines the direction in which the buttons are laid out, whether in a
/// dropdown menu or a row/column of buttons. For `left` and `up`, the buttons
/// will still appear in left-to-right or top-to-bottom order respectively.
///
/// Default: "down"
#[derive(Serialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum UpdateMenuDirection {
Left,
Right,
Up,
Down,
}
#[serde_with::skip_serializing_none]
#[derive(Serialize, Debug, FieldSetter, Clone)]
pub struct UpdateMenu {
/// Determines which button (by index starting from 0) is considered active.
active: Option<i32>,
/// Sets the background color of the update menu buttons.
#[serde(rename = "bgcolor")]
background_color: Option<Box<dyn Color>>,
/// Sets the color of the border enclosing the update menu.
#[serde(rename = "bordercolor")]
border_color: Option<Box<dyn Color>>,
/// Sets the width (in px) of the border enclosing the update menu.
#[serde(rename = "borderwidth")]
border_width: Option<usize>,
buttons: Option<Vec<Button>>,
/// Determines the direction in which the buttons are laid out, whether in
/// a dropdown menu or a row/column of buttons. For `left` and `up`,
/// the buttons will still appear in left-to-right or top-to-bottom order
/// respectively.
direction: Option<UpdateMenuDirection>,
/// Sets the font of the update menu button text.
font: Option<Font>,
/// When used in a template, named items are created in the output figure in
/// addition to any items the figure already has in this array. You can
/// modify these items in the output figure by making your own item with
/// `templateitemname` matching this `name` alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). Has no
/// effect outside of a template.
name: Option<String>,
/// Sets the padding around the buttons or dropdown menu.
pad: Option<Pad>,
/// Highlights active dropdown item or active button if true.
#[serde(rename = "showactive")]
show_active: Option<bool>,
/// Used to refer to a named item in this array in the template. Named items
/// from the template will be created even without a matching item in
/// the input figure, but you can modify one by making an item with
/// `templateitemname` matching its `name`, alongside your modifications
/// (including `visible: false` or `enabled: false` to hide it). If there is
/// no template or no matching item, this item will be hidden unless you
/// explicitly show it with `visible: true`.
template_item_name: Option<String>,
/// Determines whether the buttons are accessible via a dropdown menu or
/// whether the buttons are stacked horizontally or vertically
#[serde(rename = "type")]
ty: Option<UpdateMenuType>,
/// Determines whether or not the update menu is visible.
visible: Option<bool>,
/// Type: number between or equal to -2 and 3
/// Default: -0.05
/// Sets the x position (in normalized coordinates) of the update menu.
x: Option<f64>,
/// Sets the update menu's horizontal position anchor. This anchor binds the
/// `x` position to the "left", "center" or "right" of the range
/// selector. Default: "right"
#[serde(rename = "xanchor")]
x_anchor: Option<Anchor>,
/// Type: number between or equal to -2 and 3
/// Default: 1
/// Sets the y position (in normalized coordinates) of the update menu.
y: Option<f64>,
/// Sets the update menu's vertical position anchor This anchor binds the
/// `y` position to the "top", "middle" or "bottom" of the range
/// selector. Default: "top"
#[serde(rename = "yanchor")]
y_anchor: Option<Anchor>,
}
impl UpdateMenu {
pub fn new() -> Self {
Default::default()
}
}
#[cfg(test)]
mod tests {
use serde_json::{json, to_value};
use super::*;
use crate::{
common::{Title, Visible},
Layout,
};
#[test]
fn test_serialize_button_method() {
assert_eq!(to_value(ButtonMethod::Restyle).unwrap(), json!("restyle"));
assert_eq!(to_value(ButtonMethod::Relayout).unwrap(), json!("relayout"));
assert_eq!(to_value(ButtonMethod::Animate).unwrap(), json!("animate"));
assert_eq!(to_value(ButtonMethod::Update).unwrap(), json!("update"));
assert_eq!(to_value(ButtonMethod::Skip).unwrap(), json!("skip"));
}
#[test]
fn test_serialize_button() | "execute": true,
"label": "Label",
"method": "update",
"name": "Name",
"templateitemname": "Template",
"visible": true,
});
assert_eq!(to_value(button).unwrap(), expected);
}
#[test]
fn test_button_builder() {
let expected = json!({
"args": [
{ "visible": [true, false] },
{ "title": {"text": "Hello"}, "width": 20},
],
"label": "Label",
"method": "update",
"name": "Name",
"templateitemname": "Template",
"visible": true,
});
let button = ButtonBuilder::new()
.label("Label")
.name("Name")
.template_item_name("Template")
.visible(true)
.push_restyle(crate::Bar::<i32, i32>::modify_visible(vec![
Visible::True,
Visible::False,
]))
.push_relayout(Layout::modify_title(Title::new("Hello")))
.push_relayout(Layout::modify_width(20))
.build();
assert_eq!(to_value(button).unwrap(), expected);
}
}
| {
let button = Button::new()
.args(json!([
{ "visible": [true, false] },
{ "width": 20},
]))
.args2(json!([]))
.execute(true)
.label("Label")
.method(ButtonMethod::Update)
.name("Name")
.template_item_name("Template")
.visible(true);
let expected = json!({
"args": [
{ "visible": [true, false] },
{ "width": 20},
],
"args2": [], | identifier_body |
index_lookup.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::{Fields, TermIterator, Terms};
use core::codec::{PostingIterator, PostingIteratorFlags};
use core::doc::Term;
use core::search::{Payload, NO_MORE_DOCS};
use core::util::DocId;
use error::{ErrorKind::IllegalState, Result};
use std::collections::hash_map::HashMap;
#[derive(Debug, Serialize, Deserialize)]
pub struct TermPosition {
pub position: i32,
pub start_offset: i32,
pub end_offset: i32,
pub payload: Payload,
}
impl Default for TermPosition {
fn default() -> Self {
TermPosition::new()
}
}
impl TermPosition {
pub fn new() -> TermPosition {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Payload::with_capacity(0),
}
}
pub fn payload_as_string(&mut self) -> String {
if self.payload.is_empty() {
unimplemented!()
} else {
unimplemented!()
}
}
pub fn payload_as_float(&mut self, default: f32) -> f32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
pub fn payload_as_int(&mut self, default: i32) -> i32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
}
/// Holds all information on a particular term in a field.
pub struct LeafIndexFieldTerm<T: PostingIterator> {
postings: Option<T>,
flags: u16,
iterator: LeafPositionIterator,
#[allow(dead_code)]
identifier: Term,
freq: i32,
}
impl<T: PostingIterator> LeafIndexFieldTerm<T> {
pub fn new<TI: TermIterator<Postings = T>, Tm: Terms<Iterator = TI>, F: Fields<Terms = Tm>>(
term: &str,
field_name: &str,
flags: u16,
doc_id: DocId,
fields: &F,
) -> Result<Self> {
let identifier = Term::new(field_name.to_string(), term.as_bytes().to_vec());
if let Some(terms) = fields.terms(identifier.field())? {
let mut terms_iterator = terms.iterator()?;
let (postings, freq) = if terms_iterator.seek_exact(identifier.bytes.as_slice())? {
let mut posting = terms_iterator.postings_with_flags(flags)?;
let mut current_doc_pos = posting.doc_id();
if current_doc_pos < doc_id {
current_doc_pos = posting.advance(doc_id)?;
}
let freq = if current_doc_pos == doc_id {
posting.freq()?
} else {
0
};
(Some(posting), freq)
} else {
(None, 0)
};
let mut iterator = LeafPositionIterator::new();
iterator.resetted = false;
iterator.current_pos = 0;
iterator.freq = freq;
Ok(LeafIndexFieldTerm {
postings,
flags,
iterator,
identifier,
freq,
})
} else {
bail!(IllegalState(format!(
"Terms {} for doc {} - field '{}' must not be none!",
term, doc_id, field_name
)));
}
}
pub fn tf(&self) -> i32 {
self.freq
}
fn current_doc(&self) -> DocId {
if let Some(ref postings) = self.postings {
postings.doc_id()
} else |
}
pub fn set_document(&mut self, doc_id: i32) -> Result<()> {
let mut current_doc_pos = self.current_doc();
if current_doc_pos < doc_id {
current_doc_pos = self.postings.as_mut().unwrap().advance(doc_id)?;
}
if current_doc_pos == doc_id && doc_id < NO_MORE_DOCS {
self.freq = self.postings.as_ref().unwrap().freq()?;
} else {
self.freq = 0;
}
self.next_doc();
Ok(())
}
pub fn validate_flags(&self, flags2: u16) -> Result<()> {
if (self.flags & flags2) < flags2 {
panic!(
"You must call get with all required flags! Instead of {} call {} once",
flags2,
flags2 | self.flags
)
} else {
Ok(())
}
}
pub fn has_next(&self) -> bool {
self.iterator.current_pos < self.iterator.freq
}
pub fn next_pos(&mut self) -> Result<TermPosition> {
let term_pos = if let Some(ref mut postings) = self.postings {
TermPosition {
position: postings.next_position()?,
start_offset: postings.start_offset()?,
end_offset: postings.end_offset()?,
payload: postings.payload()?,
}
} else {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Vec::with_capacity(0),
}
};
self.iterator.current_pos += 1;
Ok(term_pos)
}
pub fn next_doc(&mut self) {
self.iterator.resetted = false;
self.iterator.current_pos = 0;
self.iterator.freq = self.tf();
}
pub fn reset(&mut self) -> Result<()> {
if self.iterator.resetted {
panic!(
"Cannot iterate twice! If you want to iterate more that once, add _CACHE \
explicitly."
)
}
self.iterator.resetted = true;
Ok(())
}
}
pub struct LeafPositionIterator {
resetted: bool,
freq: i32,
current_pos: i32,
}
impl Default for LeafPositionIterator {
fn default() -> Self {
LeafPositionIterator::new()
}
}
impl LeafPositionIterator {
pub fn new() -> LeafPositionIterator {
LeafPositionIterator {
resetted: false,
freq: -1,
current_pos: 0,
}
}
}
pub struct LeafIndexField<T: Fields> {
terms: HashMap<
String,
LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>,
>,
field_name: String,
doc_id: DocId,
fields: T,
}
///
// Script interface to all information regarding a field.
//
impl<T: Fields + Clone> LeafIndexField<T> {
pub fn new(field_name: &str, doc_id: DocId, fields: T) -> Self {
LeafIndexField {
terms: HashMap::new(),
field_name: String::from(field_name),
doc_id,
fields,
}
}
pub fn get(
&mut self,
key: &str,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
self.get_with_flags(key, PostingIteratorFlags::FREQS)
}
// TODO: might be good to get the field lengths here somewhere?
// Returns a TermInfo object that can be used to access information on
// specific terms. flags can be set as described in TermInfo.
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get_with_flags(
&mut self,
key: &str,
flags: u16,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
if!self.terms.contains_key(key) {
let index_field_term =
LeafIndexFieldTerm::new(key, &self.field_name, flags, self.doc_id, &self.fields)?;
index_field_term.validate_flags(flags)?;
self.terms.insert(String::from(key), index_field_term);
}
let index_field_term_ref = self.terms.get_mut(key).unwrap();
index_field_term_ref.validate_flags(flags)?;
Ok(index_field_term_ref)
}
pub fn set_doc_id_in_terms(&mut self, doc_id: DocId) -> Result<()> {
for ti in self.terms.values_mut() {
ti.set_document(doc_id)?;
}
Ok(())
}
}
pub struct LeafIndexLookup<T: Fields> {
pub fields: T,
pub doc_id: DocId,
index_fields: HashMap<String, LeafIndexField<T>>,
#[allow(dead_code)]
num_docs: i32,
#[allow(dead_code)]
max_doc: i32,
#[allow(dead_code)]
num_deleted_docs: i32,
}
impl<T: Fields + Clone> LeafIndexLookup<T> {
pub fn new(fields: T) -> LeafIndexLookup<T> {
LeafIndexLookup {
fields,
doc_id: -1,
index_fields: HashMap::new(),
num_docs: -1,
max_doc: -1,
num_deleted_docs: -1,
}
}
pub fn set_document(&mut self, doc_id: DocId) -> Result<()> {
if self.doc_id == doc_id {
return Ok(());
}
// We assume that docs are processed in ascending order of id. If this
// is not the case, we would have to re initialize all posting lists in
// IndexFieldTerm. TODO: Instead of assert we could also call
// setReaderInFields(); here?
if self.doc_id > doc_id {
// This might happen if the same SearchLookup is used in different
// phases, such as score and fetch phase.
// In this case we do not want to re initialize posting list etc.
// because we do not even know if term and field statistics will be
// needed in this new phase.
// Therefore we just remove all IndexFieldTerms.
self.index_fields.clear();
}
self.doc_id = doc_id;
self.set_next_doc_id_in_fields()
}
fn set_next_doc_id_in_fields(&mut self) -> Result<()> {
for stat in self.index_fields.values_mut() {
stat.set_doc_id_in_terms(self.doc_id)?;
}
Ok(())
}
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get(&mut self, key: &str) -> &mut LeafIndexField<T> {
if!self.index_fields.contains_key(key) {
let index_field = LeafIndexField::new(key, self.doc_id, self.fields.clone());
self.index_fields.insert(String::from(key), index_field);
}
self.index_fields.get_mut(key).unwrap()
}
}
| {
NO_MORE_DOCS
} | conditional_block |
index_lookup.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::{Fields, TermIterator, Terms};
use core::codec::{PostingIterator, PostingIteratorFlags};
use core::doc::Term;
use core::search::{Payload, NO_MORE_DOCS};
use core::util::DocId;
use error::{ErrorKind::IllegalState, Result};
use std::collections::hash_map::HashMap;
#[derive(Debug, Serialize, Deserialize)]
pub struct TermPosition {
pub position: i32,
pub start_offset: i32,
pub end_offset: i32,
pub payload: Payload,
}
impl Default for TermPosition {
fn default() -> Self {
TermPosition::new()
}
}
impl TermPosition {
pub fn new() -> TermPosition {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Payload::with_capacity(0),
}
}
pub fn payload_as_string(&mut self) -> String {
if self.payload.is_empty() {
unimplemented!()
} else {
unimplemented!()
}
}
pub fn payload_as_float(&mut self, default: f32) -> f32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
pub fn payload_as_int(&mut self, default: i32) -> i32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
}
/// Holds all information on a particular term in a field.
pub struct LeafIndexFieldTerm<T: PostingIterator> {
postings: Option<T>,
flags: u16,
iterator: LeafPositionIterator,
#[allow(dead_code)]
identifier: Term,
freq: i32,
}
impl<T: PostingIterator> LeafIndexFieldTerm<T> {
pub fn new<TI: TermIterator<Postings = T>, Tm: Terms<Iterator = TI>, F: Fields<Terms = Tm>>(
term: &str,
field_name: &str,
flags: u16,
doc_id: DocId,
fields: &F,
) -> Result<Self> {
let identifier = Term::new(field_name.to_string(), term.as_bytes().to_vec());
if let Some(terms) = fields.terms(identifier.field())? {
let mut terms_iterator = terms.iterator()?;
let (postings, freq) = if terms_iterator.seek_exact(identifier.bytes.as_slice())? {
let mut posting = terms_iterator.postings_with_flags(flags)?;
let mut current_doc_pos = posting.doc_id();
if current_doc_pos < doc_id {
current_doc_pos = posting.advance(doc_id)?;
}
let freq = if current_doc_pos == doc_id {
posting.freq()?
} else {
0
};
(Some(posting), freq)
} else {
(None, 0)
};
let mut iterator = LeafPositionIterator::new();
iterator.resetted = false;
iterator.current_pos = 0;
iterator.freq = freq;
Ok(LeafIndexFieldTerm {
postings,
flags,
iterator,
identifier,
freq,
})
} else {
bail!(IllegalState(format!(
"Terms {} for doc {} - field '{}' must not be none!",
term, doc_id, field_name
)));
}
}
pub fn tf(&self) -> i32 |
fn current_doc(&self) -> DocId {
if let Some(ref postings) = self.postings {
postings.doc_id()
} else {
NO_MORE_DOCS
}
}
pub fn set_document(&mut self, doc_id: i32) -> Result<()> {
let mut current_doc_pos = self.current_doc();
if current_doc_pos < doc_id {
current_doc_pos = self.postings.as_mut().unwrap().advance(doc_id)?;
}
if current_doc_pos == doc_id && doc_id < NO_MORE_DOCS {
self.freq = self.postings.as_ref().unwrap().freq()?;
} else {
self.freq = 0;
}
self.next_doc();
Ok(())
}
pub fn validate_flags(&self, flags2: u16) -> Result<()> {
if (self.flags & flags2) < flags2 {
panic!(
"You must call get with all required flags! Instead of {} call {} once",
flags2,
flags2 | self.flags
)
} else {
Ok(())
}
}
pub fn has_next(&self) -> bool {
self.iterator.current_pos < self.iterator.freq
}
pub fn next_pos(&mut self) -> Result<TermPosition> {
let term_pos = if let Some(ref mut postings) = self.postings {
TermPosition {
position: postings.next_position()?,
start_offset: postings.start_offset()?,
end_offset: postings.end_offset()?,
payload: postings.payload()?,
}
} else {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Vec::with_capacity(0),
}
};
self.iterator.current_pos += 1;
Ok(term_pos)
}
pub fn next_doc(&mut self) {
self.iterator.resetted = false;
self.iterator.current_pos = 0;
self.iterator.freq = self.tf();
}
pub fn reset(&mut self) -> Result<()> {
if self.iterator.resetted {
panic!(
"Cannot iterate twice! If you want to iterate more that once, add _CACHE \
explicitly."
)
}
self.iterator.resetted = true;
Ok(())
}
}
pub struct LeafPositionIterator {
resetted: bool,
freq: i32,
current_pos: i32,
}
impl Default for LeafPositionIterator {
fn default() -> Self {
LeafPositionIterator::new()
}
}
impl LeafPositionIterator {
pub fn new() -> LeafPositionIterator {
LeafPositionIterator {
resetted: false,
freq: -1,
current_pos: 0,
}
}
}
pub struct LeafIndexField<T: Fields> {
terms: HashMap<
String,
LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>,
>,
field_name: String,
doc_id: DocId,
fields: T,
}
///
// Script interface to all information regarding a field.
//
impl<T: Fields + Clone> LeafIndexField<T> {
pub fn new(field_name: &str, doc_id: DocId, fields: T) -> Self {
LeafIndexField {
terms: HashMap::new(),
field_name: String::from(field_name),
doc_id,
fields,
}
}
pub fn get(
&mut self,
key: &str,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
self.get_with_flags(key, PostingIteratorFlags::FREQS)
}
// TODO: might be good to get the field lengths here somewhere?
// Returns a TermInfo object that can be used to access information on
// specific terms. flags can be set as described in TermInfo.
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get_with_flags(
&mut self,
key: &str,
flags: u16,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
if!self.terms.contains_key(key) {
let index_field_term =
LeafIndexFieldTerm::new(key, &self.field_name, flags, self.doc_id, &self.fields)?;
index_field_term.validate_flags(flags)?;
self.terms.insert(String::from(key), index_field_term);
}
let index_field_term_ref = self.terms.get_mut(key).unwrap();
index_field_term_ref.validate_flags(flags)?;
Ok(index_field_term_ref)
}
pub fn set_doc_id_in_terms(&mut self, doc_id: DocId) -> Result<()> {
for ti in self.terms.values_mut() {
ti.set_document(doc_id)?;
}
Ok(())
}
}
pub struct LeafIndexLookup<T: Fields> {
pub fields: T,
pub doc_id: DocId,
index_fields: HashMap<String, LeafIndexField<T>>,
#[allow(dead_code)]
num_docs: i32,
#[allow(dead_code)]
max_doc: i32,
#[allow(dead_code)]
num_deleted_docs: i32,
}
impl<T: Fields + Clone> LeafIndexLookup<T> {
pub fn new(fields: T) -> LeafIndexLookup<T> {
LeafIndexLookup {
fields,
doc_id: -1,
index_fields: HashMap::new(),
num_docs: -1,
max_doc: -1,
num_deleted_docs: -1,
}
}
pub fn set_document(&mut self, doc_id: DocId) -> Result<()> {
if self.doc_id == doc_id {
return Ok(());
}
// We assume that docs are processed in ascending order of id. If this
// is not the case, we would have to re initialize all posting lists in
// IndexFieldTerm. TODO: Instead of assert we could also call
// setReaderInFields(); here?
if self.doc_id > doc_id {
// This might happen if the same SearchLookup is used in different
// phases, such as score and fetch phase.
// In this case we do not want to re initialize posting list etc.
// because we do not even know if term and field statistics will be
// needed in this new phase.
// Therefore we just remove all IndexFieldTerms.
self.index_fields.clear();
}
self.doc_id = doc_id;
self.set_next_doc_id_in_fields()
}
fn set_next_doc_id_in_fields(&mut self) -> Result<()> {
for stat in self.index_fields.values_mut() {
stat.set_doc_id_in_terms(self.doc_id)?;
}
Ok(())
}
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get(&mut self, key: &str) -> &mut LeafIndexField<T> {
if!self.index_fields.contains_key(key) {
let index_field = LeafIndexField::new(key, self.doc_id, self.fields.clone());
self.index_fields.insert(String::from(key), index_field);
}
self.index_fields.get_mut(key).unwrap()
}
}
| {
self.freq
} | identifier_body |
index_lookup.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::{Fields, TermIterator, Terms};
use core::codec::{PostingIterator, PostingIteratorFlags};
use core::doc::Term;
use core::search::{Payload, NO_MORE_DOCS};
use core::util::DocId;
use error::{ErrorKind::IllegalState, Result};
use std::collections::hash_map::HashMap;
#[derive(Debug, Serialize, Deserialize)]
pub struct TermPosition {
pub position: i32,
pub start_offset: i32,
pub end_offset: i32,
pub payload: Payload,
}
impl Default for TermPosition {
fn default() -> Self {
TermPosition::new()
}
}
impl TermPosition {
pub fn new() -> TermPosition {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Payload::with_capacity(0),
}
}
pub fn payload_as_string(&mut self) -> String {
if self.payload.is_empty() {
unimplemented!()
} else {
unimplemented!()
}
}
pub fn payload_as_float(&mut self, default: f32) -> f32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
pub fn payload_as_int(&mut self, default: i32) -> i32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
}
/// Holds all information on a particular term in a field.
pub struct | <T: PostingIterator> {
postings: Option<T>,
flags: u16,
iterator: LeafPositionIterator,
#[allow(dead_code)]
identifier: Term,
freq: i32,
}
impl<T: PostingIterator> LeafIndexFieldTerm<T> {
pub fn new<TI: TermIterator<Postings = T>, Tm: Terms<Iterator = TI>, F: Fields<Terms = Tm>>(
term: &str,
field_name: &str,
flags: u16,
doc_id: DocId,
fields: &F,
) -> Result<Self> {
let identifier = Term::new(field_name.to_string(), term.as_bytes().to_vec());
if let Some(terms) = fields.terms(identifier.field())? {
let mut terms_iterator = terms.iterator()?;
let (postings, freq) = if terms_iterator.seek_exact(identifier.bytes.as_slice())? {
let mut posting = terms_iterator.postings_with_flags(flags)?;
let mut current_doc_pos = posting.doc_id();
if current_doc_pos < doc_id {
current_doc_pos = posting.advance(doc_id)?;
}
let freq = if current_doc_pos == doc_id {
posting.freq()?
} else {
0
};
(Some(posting), freq)
} else {
(None, 0)
};
let mut iterator = LeafPositionIterator::new();
iterator.resetted = false;
iterator.current_pos = 0;
iterator.freq = freq;
Ok(LeafIndexFieldTerm {
postings,
flags,
iterator,
identifier,
freq,
})
} else {
bail!(IllegalState(format!(
"Terms {} for doc {} - field '{}' must not be none!",
term, doc_id, field_name
)));
}
}
pub fn tf(&self) -> i32 {
self.freq
}
fn current_doc(&self) -> DocId {
if let Some(ref postings) = self.postings {
postings.doc_id()
} else {
NO_MORE_DOCS
}
}
pub fn set_document(&mut self, doc_id: i32) -> Result<()> {
let mut current_doc_pos = self.current_doc();
if current_doc_pos < doc_id {
current_doc_pos = self.postings.as_mut().unwrap().advance(doc_id)?;
}
if current_doc_pos == doc_id && doc_id < NO_MORE_DOCS {
self.freq = self.postings.as_ref().unwrap().freq()?;
} else {
self.freq = 0;
}
self.next_doc();
Ok(())
}
pub fn validate_flags(&self, flags2: u16) -> Result<()> {
if (self.flags & flags2) < flags2 {
panic!(
"You must call get with all required flags! Instead of {} call {} once",
flags2,
flags2 | self.flags
)
} else {
Ok(())
}
}
pub fn has_next(&self) -> bool {
self.iterator.current_pos < self.iterator.freq
}
pub fn next_pos(&mut self) -> Result<TermPosition> {
let term_pos = if let Some(ref mut postings) = self.postings {
TermPosition {
position: postings.next_position()?,
start_offset: postings.start_offset()?,
end_offset: postings.end_offset()?,
payload: postings.payload()?,
}
} else {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Vec::with_capacity(0),
}
};
self.iterator.current_pos += 1;
Ok(term_pos)
}
pub fn next_doc(&mut self) {
self.iterator.resetted = false;
self.iterator.current_pos = 0;
self.iterator.freq = self.tf();
}
pub fn reset(&mut self) -> Result<()> {
if self.iterator.resetted {
panic!(
"Cannot iterate twice! If you want to iterate more that once, add _CACHE \
explicitly."
)
}
self.iterator.resetted = true;
Ok(())
}
}
pub struct LeafPositionIterator {
resetted: bool,
freq: i32,
current_pos: i32,
}
impl Default for LeafPositionIterator {
fn default() -> Self {
LeafPositionIterator::new()
}
}
impl LeafPositionIterator {
pub fn new() -> LeafPositionIterator {
LeafPositionIterator {
resetted: false,
freq: -1,
current_pos: 0,
}
}
}
pub struct LeafIndexField<T: Fields> {
terms: HashMap<
String,
LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>,
>,
field_name: String,
doc_id: DocId,
fields: T,
}
///
// Script interface to all information regarding a field.
//
impl<T: Fields + Clone> LeafIndexField<T> {
pub fn new(field_name: &str, doc_id: DocId, fields: T) -> Self {
LeafIndexField {
terms: HashMap::new(),
field_name: String::from(field_name),
doc_id,
fields,
}
}
pub fn get(
&mut self,
key: &str,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
self.get_with_flags(key, PostingIteratorFlags::FREQS)
}
// TODO: might be good to get the field lengths here somewhere?
// Returns a TermInfo object that can be used to access information on
// specific terms. flags can be set as described in TermInfo.
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get_with_flags(
&mut self,
key: &str,
flags: u16,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
if!self.terms.contains_key(key) {
let index_field_term =
LeafIndexFieldTerm::new(key, &self.field_name, flags, self.doc_id, &self.fields)?;
index_field_term.validate_flags(flags)?;
self.terms.insert(String::from(key), index_field_term);
}
let index_field_term_ref = self.terms.get_mut(key).unwrap();
index_field_term_ref.validate_flags(flags)?;
Ok(index_field_term_ref)
}
pub fn set_doc_id_in_terms(&mut self, doc_id: DocId) -> Result<()> {
for ti in self.terms.values_mut() {
ti.set_document(doc_id)?;
}
Ok(())
}
}
pub struct LeafIndexLookup<T: Fields> {
pub fields: T,
pub doc_id: DocId,
index_fields: HashMap<String, LeafIndexField<T>>,
#[allow(dead_code)]
num_docs: i32,
#[allow(dead_code)]
max_doc: i32,
#[allow(dead_code)]
num_deleted_docs: i32,
}
impl<T: Fields + Clone> LeafIndexLookup<T> {
pub fn new(fields: T) -> LeafIndexLookup<T> {
LeafIndexLookup {
fields,
doc_id: -1,
index_fields: HashMap::new(),
num_docs: -1,
max_doc: -1,
num_deleted_docs: -1,
}
}
pub fn set_document(&mut self, doc_id: DocId) -> Result<()> {
if self.doc_id == doc_id {
return Ok(());
}
// We assume that docs are processed in ascending order of id. If this
// is not the case, we would have to re initialize all posting lists in
// IndexFieldTerm. TODO: Instead of assert we could also call
// setReaderInFields(); here?
if self.doc_id > doc_id {
// This might happen if the same SearchLookup is used in different
// phases, such as score and fetch phase.
// In this case we do not want to re initialize posting list etc.
// because we do not even know if term and field statistics will be
// needed in this new phase.
// Therefore we just remove all IndexFieldTerms.
self.index_fields.clear();
}
self.doc_id = doc_id;
self.set_next_doc_id_in_fields()
}
fn set_next_doc_id_in_fields(&mut self) -> Result<()> {
for stat in self.index_fields.values_mut() {
stat.set_doc_id_in_terms(self.doc_id)?;
}
Ok(())
}
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get(&mut self, key: &str) -> &mut LeafIndexField<T> {
if!self.index_fields.contains_key(key) {
let index_field = LeafIndexField::new(key, self.doc_id, self.fields.clone());
self.index_fields.insert(String::from(key), index_field);
}
self.index_fields.get_mut(key).unwrap()
}
}
| LeafIndexFieldTerm | identifier_name |
index_lookup.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::{Fields, TermIterator, Terms};
use core::codec::{PostingIterator, PostingIteratorFlags};
use core::doc::Term;
use core::search::{Payload, NO_MORE_DOCS};
use core::util::DocId;
use error::{ErrorKind::IllegalState, Result};
use std::collections::hash_map::HashMap;
#[derive(Debug, Serialize, Deserialize)]
pub struct TermPosition {
pub position: i32,
pub start_offset: i32,
pub end_offset: i32,
pub payload: Payload,
}
impl Default for TermPosition {
fn default() -> Self {
TermPosition::new()
}
}
impl TermPosition {
pub fn new() -> TermPosition {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Payload::with_capacity(0),
}
}
pub fn payload_as_string(&mut self) -> String {
if self.payload.is_empty() {
unimplemented!()
} else {
unimplemented!()
}
}
pub fn payload_as_float(&mut self, default: f32) -> f32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
pub fn payload_as_int(&mut self, default: i32) -> i32 {
if self.payload.is_empty() {
default
} else {
unimplemented!()
}
}
}
/// Holds all information on a particular term in a field.
pub struct LeafIndexFieldTerm<T: PostingIterator> {
postings: Option<T>,
flags: u16,
iterator: LeafPositionIterator,
#[allow(dead_code)]
identifier: Term,
freq: i32,
}
impl<T: PostingIterator> LeafIndexFieldTerm<T> {
pub fn new<TI: TermIterator<Postings = T>, Tm: Terms<Iterator = TI>, F: Fields<Terms = Tm>>(
term: &str,
field_name: &str,
flags: u16,
doc_id: DocId,
fields: &F,
) -> Result<Self> {
let identifier = Term::new(field_name.to_string(), term.as_bytes().to_vec());
if let Some(terms) = fields.terms(identifier.field())? {
let mut terms_iterator = terms.iterator()?;
let (postings, freq) = if terms_iterator.seek_exact(identifier.bytes.as_slice())? {
let mut posting = terms_iterator.postings_with_flags(flags)?;
let mut current_doc_pos = posting.doc_id();
if current_doc_pos < doc_id {
current_doc_pos = posting.advance(doc_id)?;
}
let freq = if current_doc_pos == doc_id {
posting.freq()?
} else {
0
};
(Some(posting), freq)
} else {
(None, 0)
};
let mut iterator = LeafPositionIterator::new();
iterator.resetted = false;
iterator.current_pos = 0;
iterator.freq = freq;
Ok(LeafIndexFieldTerm {
postings,
flags,
iterator,
identifier,
freq,
})
} else {
bail!(IllegalState(format!(
"Terms {} for doc {} - field '{}' must not be none!",
term, doc_id, field_name
)));
}
}
pub fn tf(&self) -> i32 {
self.freq
}
fn current_doc(&self) -> DocId {
if let Some(ref postings) = self.postings {
postings.doc_id()
} else {
NO_MORE_DOCS
}
}
pub fn set_document(&mut self, doc_id: i32) -> Result<()> {
let mut current_doc_pos = self.current_doc();
if current_doc_pos < doc_id {
current_doc_pos = self.postings.as_mut().unwrap().advance(doc_id)?;
}
if current_doc_pos == doc_id && doc_id < NO_MORE_DOCS {
self.freq = self.postings.as_ref().unwrap().freq()?;
} else {
self.freq = 0;
}
self.next_doc();
Ok(())
}
pub fn validate_flags(&self, flags2: u16) -> Result<()> {
if (self.flags & flags2) < flags2 {
panic!(
"You must call get with all required flags! Instead of {} call {} once",
flags2,
flags2 | self.flags
)
} else {
Ok(())
}
}
pub fn has_next(&self) -> bool {
self.iterator.current_pos < self.iterator.freq
}
pub fn next_pos(&mut self) -> Result<TermPosition> {
let term_pos = if let Some(ref mut postings) = self.postings {
TermPosition {
position: postings.next_position()?,
start_offset: postings.start_offset()?,
end_offset: postings.end_offset()?,
payload: postings.payload()?,
}
} else {
TermPosition {
position: -1,
start_offset: -1,
end_offset: -1,
payload: Vec::with_capacity(0),
}
};
self.iterator.current_pos += 1;
Ok(term_pos)
}
pub fn next_doc(&mut self) {
self.iterator.resetted = false;
self.iterator.current_pos = 0;
self.iterator.freq = self.tf();
}
pub fn reset(&mut self) -> Result<()> {
if self.iterator.resetted {
panic!(
"Cannot iterate twice! If you want to iterate more that once, add _CACHE \
explicitly."
)
}
self.iterator.resetted = true;
Ok(())
}
}
pub struct LeafPositionIterator {
resetted: bool,
freq: i32,
current_pos: i32,
}
impl Default for LeafPositionIterator {
fn default() -> Self {
LeafPositionIterator::new()
}
}
impl LeafPositionIterator {
pub fn new() -> LeafPositionIterator {
LeafPositionIterator {
resetted: false,
freq: -1,
current_pos: 0,
}
}
}
pub struct LeafIndexField<T: Fields> { | LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>,
>,
field_name: String,
doc_id: DocId,
fields: T,
}
///
// Script interface to all information regarding a field.
//
impl<T: Fields + Clone> LeafIndexField<T> {
pub fn new(field_name: &str, doc_id: DocId, fields: T) -> Self {
LeafIndexField {
terms: HashMap::new(),
field_name: String::from(field_name),
doc_id,
fields,
}
}
pub fn get(
&mut self,
key: &str,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
self.get_with_flags(key, PostingIteratorFlags::FREQS)
}
// TODO: might be good to get the field lengths here somewhere?
// Returns a TermInfo object that can be used to access information on
// specific terms. flags can be set as described in TermInfo.
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get_with_flags(
&mut self,
key: &str,
flags: u16,
) -> Result<&mut LeafIndexFieldTerm<<<T::Terms as Terms>::Iterator as TermIterator>::Postings>>
{
if!self.terms.contains_key(key) {
let index_field_term =
LeafIndexFieldTerm::new(key, &self.field_name, flags, self.doc_id, &self.fields)?;
index_field_term.validate_flags(flags)?;
self.terms.insert(String::from(key), index_field_term);
}
let index_field_term_ref = self.terms.get_mut(key).unwrap();
index_field_term_ref.validate_flags(flags)?;
Ok(index_field_term_ref)
}
pub fn set_doc_id_in_terms(&mut self, doc_id: DocId) -> Result<()> {
for ti in self.terms.values_mut() {
ti.set_document(doc_id)?;
}
Ok(())
}
}
pub struct LeafIndexLookup<T: Fields> {
pub fields: T,
pub doc_id: DocId,
index_fields: HashMap<String, LeafIndexField<T>>,
#[allow(dead_code)]
num_docs: i32,
#[allow(dead_code)]
max_doc: i32,
#[allow(dead_code)]
num_deleted_docs: i32,
}
impl<T: Fields + Clone> LeafIndexLookup<T> {
pub fn new(fields: T) -> LeafIndexLookup<T> {
LeafIndexLookup {
fields,
doc_id: -1,
index_fields: HashMap::new(),
num_docs: -1,
max_doc: -1,
num_deleted_docs: -1,
}
}
pub fn set_document(&mut self, doc_id: DocId) -> Result<()> {
if self.doc_id == doc_id {
return Ok(());
}
// We assume that docs are processed in ascending order of id. If this
// is not the case, we would have to re initialize all posting lists in
// IndexFieldTerm. TODO: Instead of assert we could also call
// setReaderInFields(); here?
if self.doc_id > doc_id {
// This might happen if the same SearchLookup is used in different
// phases, such as score and fetch phase.
// In this case we do not want to re initialize posting list etc.
// because we do not even know if term and field statistics will be
// needed in this new phase.
// Therefore we just remove all IndexFieldTerms.
self.index_fields.clear();
}
self.doc_id = doc_id;
self.set_next_doc_id_in_fields()
}
fn set_next_doc_id_in_fields(&mut self) -> Result<()> {
for stat in self.index_fields.values_mut() {
stat.set_doc_id_in_terms(self.doc_id)?;
}
Ok(())
}
// TODO: here might be potential for running time improvement? If we knew in
// advance which terms are requested, we could provide an array which the
// user could then iterate over.
//
pub fn get(&mut self, key: &str) -> &mut LeafIndexField<T> {
if!self.index_fields.contains_key(key) {
let index_field = LeafIndexField::new(key, self.doc_id, self.fields.clone());
self.index_fields.insert(String::from(key), index_field);
}
self.index_fields.get_mut(key).unwrap()
}
} | terms: HashMap<
String, | random_line_split |
main.rs | use clap::*;
use gre::*;
use noise::*;
use rand::prelude::*;
use rayon::prelude::*;
use std::f64::consts::PI;
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "297.0")]
pub width: f64,
#[clap(short, long, default_value = "210.0")]
pub height: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
fn heart_function(t: f64) -> (f64, f64) {
let x = 16.0 * f64::sin(t).powi(3);
let y = -13.0 * f64::cos(t)
+ 5.0 * f64::cos(2.0 * t)
+ 2.0 * f64::cos(3.0 * t)
+ f64::cos(4.0 * t);
(x * 0.059, y * 0.059)
}
fn heart_spiral(ox: f64, oy: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let mut points = Vec::new();
let mut t = 0.0;
let mut r = 0.0;
let end_r = radius + 2.0 * PI * dr;
while r < end_r {
let da = 1.0 / (r + 8.0);
t += da;
r += 0.2 * dr * da;
let (x, y) = heart_function(t);
let v = r.min(radius);
let dy = 0.1 * radius * (1. - v / radius);
let p = (x * v + ox, y * v + oy + dy);
points.push(p);
}
// points.extend(circle_route((ox, oy), radius, 100));
points
}
fn heart_nested(
ox: f64,
oy: f64,
radius: f64,
dr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = radius;
while r > 0.1 {
let mut route = vec![];
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
routes.push(route);
}
r -= dr;
}
routes
}
fn heart(ox: f64, oy: f64, r: f64, ang: f64) -> Vec<(f64, f64)> {
let mut route = Vec::new();
let count = (2.0 * PI * r / 0.5).floor() as usize;
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let (x, y) = p_r((x, y), ang);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
route
}
fn heart_nested_rotating<R: Rng>(
rng: &mut R,
ox: f64,
oy: f64,
radius: f64,
extra_radius: f64,
dr: f64,
stopr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = extra_radius;
let perlin = Perlin::new();
let seed = rng.gen_range(-555., 555.);
let f = rng.gen_range(0.05, 0.1) * rng.gen_range(0.2, 1.0);
let amp = rng.gen_range(0.03, 0.08) / f;
let basen = perlin.get([seed, f * r]);
while r > stopr {
let actualr = r.min(radius);
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
let n = perlin.get([seed, f * r]) - basen;
let offr = n * amp;
let route = heart(ox, oy, actualr, offr);
routes.push(route);
}
r -= dr;
}
routes
}
fn cell(
seed: f64,
origin: (f64, f64),
width: f64,
height: f64,
pad: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut rng = rng_from_seed(seed);
let dr = rng.gen_range(0.6, 1.0);
let r = (width.min(height) / 2.0 - pad) * rng.gen_range(0.8, 1.0);
let r2 = r
* (1.0
+ rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(-1.0f64, 1.0).max(0.0));
/*if rng.gen_bool(0.1) {
routes.extend(heart_nested(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else */
if rng.gen_bool(0.1) {
routes.push(heart_spiral(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else {
let stopr = if rng.gen_bool(0.5) {
rng.gen_range(0.1, 0.7) * r
} else {
0.1
};
routes.extend(heart_nested_rotating(
&mut rng,
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
r2,
dr,
stopr,
));
}
let mut mask = PaintMask::new(0.2, width, height);
let ppad = rng.gen_range(4.0, 6.0);
// TODO use a inner heart step as mask to make a white?
// to protect the paper from having too much passage, we will cut some lines based on a grid lookup.
let prec = 0.5;
let passage_limit = 10;
let minlen = 3;
let mut passage = Passage2DCounter::new(prec, width, height);
let mut paths = vec![];
for r in routes {
let mut localpassage = Passage2DCounter::new(prec, width, height);
let mut path: Vec<(f64, f64)> = vec![];
for p in r {
let localp = (p.0 - origin.0, p.1 - origin.1);
if passage.get(localp) > passage_limit {
if path.len() >= minlen {
paths.push(path);
}
path = vec![];
} else {
path.push(p);
}
localpassage.count(localp);
mask.paint_circle(&VCircle::new(p.0 - origin.0, p.1 - origin.1, ppad));
}
if path.len() >= minlen {
paths.push(path);
}
passage.count_once_from(&localpassage);
}
routes = paths;
let bounds = (pad, pad, width - pad, height - pad);
let in_shape = |p: (f64, f64)| -> bool {
!mask.is_painted(p) && strictly_in_boundaries(p, bounds)
};
let does_overlap = |c: &VCircle| {
in_shape((c.x, c.y))
&& circle_route((c.x, c.y), c.r, 8)
.iter()
.all(|&p| in_shape(p))
};
let ppad = rng.gen_range(0.4, 0.8);
let min = rng.gen_range(1.5, 2.0);
let max = min + rng.gen_range(0.0, 5.0);
let optim = rng.gen_range(1, 10);
let count = 2000;
let circles = packing(
&mut rng,
vec![],
5000000,
count,
optim,
ppad,
bounds,
&does_overlap,
min,
max,
);
let aligned = rng.gen_bool(0.3);
for c in circles {
let x = c.x + origin.0;
let y = c.y + origin.1;
let r = c.r;
let ang = if aligned {
0.
} else {
PI + (c.x - width / 2.0).atan2(c.y - height / 2.0)
};
routes.push(heart(x, y, r, ang));
}
routes
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let cw = width / 2.;
let ch = height / 2.;
let pad = 5.;
let cols = (width / cw).floor() as usize;
let rows = (height / ch).floor() as usize;
let offsetx = 0.0;
let offsety = 0.0;
let routes = (0..rows)
.into_par_iter()
.flat_map(|j| {
(0..cols).into_par_iter().flat_map(move |i| {
cell(
opts.seed / 7.7 + (i + j * cols) as f64 / 0.3,
(offsetx + i as f64 * cw, offsety + j as f64 * ch),
cw,
ch,
pad,
)
})
})
.collect::<Vec<Vec<(f64, f64)>>>();
vec![(routes, "black")]
.iter()
.enumerate()
.map(|(i, (routes, color))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route_curve(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.35, data));
l
})
.collect()
}
fn | () {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("white", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
struct PaintMask {
mask: Vec<bool>,
precision: f64,
width: f64,
height: f64,
}
impl PaintMask {
fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision) as usize;
let hi = (height / precision) as usize;
Self {
mask: vec![false; wi * hi],
width,
height,
precision,
}
}
fn is_painted(&self, point: (f64, f64)) -> bool {
// check out of bounds
if point.0 <= 0.0
|| point.0 >= self.width
|| point.1 <= 0.0
|| point.1 >= self.height
{
return false;
}
let precision = self.precision;
let width = self.width;
let x = (point.0 / precision) as usize;
let y = (point.1 / precision) as usize;
let wi = (width / precision) as usize;
self.mask[x + y * wi]
}
fn paint_circle(&mut self, circle: &VCircle) {
let (minx, miny, maxx, maxy) = (
circle.x - circle.r,
circle.y - circle.r,
circle.x + circle.r,
circle.y + circle.r,
);
let precision = self.precision;
let width = self.width;
let minx = (minx / precision) as usize;
let miny = (miny / precision) as usize;
let maxx = (maxx / precision) as usize;
let maxy = (maxy / precision) as usize;
let wi = (width / precision) as usize;
let hi = (self.height / precision) as usize;
for x in minx..maxx {
if x >= wi {
continue;
}
for y in miny..maxy {
if y >= hi {
continue;
}
let point = (x as f64 * precision, y as f64 * precision);
if euclidian_dist(point, (circle.x, circle.y)) < circle.r {
self.mask[x + y * wi] = true;
}
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct VCircle {
x: f64,
y: f64,
r: f64,
}
impl VCircle {
fn new(x: f64, y: f64, r: f64) -> Self {
VCircle { x, y, r }
}
fn dist(self: &Self, c: &VCircle) -> f64 {
euclidian_dist((self.x, self.y), (c.x, c.y)) - c.r - self.r
}
fn collides(self: &Self, c: &VCircle) -> bool {
self.dist(c) <= 0.0
}
}
fn scaling_search<F: FnMut(f64) -> bool>(
mut f: F,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let mut from = min_scale;
let mut to = max_scale;
loop {
if!f(from) {
return None;
}
if to - from < 0.1 {
return Some(from);
}
let middle = (to + from) / 2.0;
if!f(middle) {
to = middle;
} else {
from = middle;
}
}
}
fn search_circle_radius(
does_overlap: &dyn Fn(&VCircle) -> bool,
circles: &Vec<VCircle>,
x: f64,
y: f64,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let overlaps = |size| {
let c = VCircle::new(x, y, size);
does_overlap(&c) &&!circles.iter().any(|other| c.collides(other))
};
scaling_search(overlaps, min_scale, max_scale)
}
fn packing<R: Rng>(
rng: &mut R,
initial_circles: Vec<VCircle>,
iterations: usize,
desired_count: usize,
optimize_size: usize,
pad: f64,
bound: (f64, f64, f64, f64),
does_overlap: &dyn Fn(&VCircle) -> bool,
min_scale: f64,
max_scale: f64,
) -> Vec<VCircle> {
let mut circles = initial_circles.clone();
let mut tries = Vec::new();
for _i in 0..iterations {
let x: f64 = rng.gen_range(bound.0, bound.2);
let y: f64 = rng.gen_range(bound.1, bound.3);
if let Some(size) =
search_circle_radius(&does_overlap, &circles, x, y, min_scale, max_scale)
{
let circle = VCircle::new(x, y, size - pad);
tries.push(circle);
if tries.len() > optimize_size {
tries.sort_by(|a, b| b.r.partial_cmp(&a.r).unwrap());
let c = tries[0];
circles.push(c.clone());
tries = Vec::new();
}
}
if circles.len() > desired_count {
break;
}
}
circles
}
pub struct Passage2DCounter {
granularity: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage2DCounter {
pub fn new(granularity: f64, width: f64, height: f64) -> Self {
let wi = (width / granularity).ceil() as usize;
let hi = (height / granularity).ceil() as usize;
let counters = vec![0; wi * hi];
Passage2DCounter {
granularity,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.granularity).ceil() as usize;
let hi = (self.height / self.granularity).ceil() as usize;
let xi = ((x / self.granularity).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.granularity).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
pub fn get(self: &Self, p: (f64, f64)) -> usize {
self.counters[self.index(p)]
}
pub fn count_once_from(self: &mut Self, other: &Self) {
for i in 0..self.counters.len() {
self.counters[i] += if other.counters[i] > 0 { 1 } else { 0 };
}
}
}
| main | identifier_name |
main.rs | use clap::*;
use gre::*;
use noise::*;
use rand::prelude::*;
use rayon::prelude::*;
use std::f64::consts::PI;
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "297.0")]
pub width: f64,
#[clap(short, long, default_value = "210.0")]
pub height: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
fn heart_function(t: f64) -> (f64, f64) {
let x = 16.0 * f64::sin(t).powi(3);
let y = -13.0 * f64::cos(t)
+ 5.0 * f64::cos(2.0 * t)
+ 2.0 * f64::cos(3.0 * t)
+ f64::cos(4.0 * t);
(x * 0.059, y * 0.059)
}
fn heart_spiral(ox: f64, oy: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let mut points = Vec::new();
let mut t = 0.0;
let mut r = 0.0;
let end_r = radius + 2.0 * PI * dr;
while r < end_r {
let da = 1.0 / (r + 8.0);
t += da;
r += 0.2 * dr * da;
let (x, y) = heart_function(t);
let v = r.min(radius);
let dy = 0.1 * radius * (1. - v / radius);
let p = (x * v + ox, y * v + oy + dy);
points.push(p);
}
// points.extend(circle_route((ox, oy), radius, 100));
points
}
fn heart_nested(
ox: f64,
oy: f64,
radius: f64,
dr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = radius;
while r > 0.1 {
let mut route = vec![];
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
routes.push(route);
}
r -= dr;
}
routes
}
fn heart(ox: f64, oy: f64, r: f64, ang: f64) -> Vec<(f64, f64)> {
let mut route = Vec::new();
let count = (2.0 * PI * r / 0.5).floor() as usize;
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let (x, y) = p_r((x, y), ang);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
route
}
fn heart_nested_rotating<R: Rng>(
rng: &mut R,
ox: f64,
oy: f64,
radius: f64,
extra_radius: f64,
dr: f64,
stopr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = extra_radius;
let perlin = Perlin::new();
let seed = rng.gen_range(-555., 555.);
let f = rng.gen_range(0.05, 0.1) * rng.gen_range(0.2, 1.0);
let amp = rng.gen_range(0.03, 0.08) / f;
let basen = perlin.get([seed, f * r]);
while r > stopr {
let actualr = r.min(radius);
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
let n = perlin.get([seed, f * r]) - basen;
let offr = n * amp;
let route = heart(ox, oy, actualr, offr);
routes.push(route);
}
r -= dr;
}
routes
}
fn cell(
seed: f64,
origin: (f64, f64),
width: f64,
height: f64,
pad: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut rng = rng_from_seed(seed);
let dr = rng.gen_range(0.6, 1.0);
let r = (width.min(height) / 2.0 - pad) * rng.gen_range(0.8, 1.0);
let r2 = r
* (1.0
+ rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(-1.0f64, 1.0).max(0.0));
/*if rng.gen_bool(0.1) {
routes.extend(heart_nested(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else */
if rng.gen_bool(0.1) {
routes.push(heart_spiral(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else {
let stopr = if rng.gen_bool(0.5) {
rng.gen_range(0.1, 0.7) * r
} else {
0.1
};
routes.extend(heart_nested_rotating(
&mut rng,
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
r2,
dr,
stopr,
));
}
let mut mask = PaintMask::new(0.2, width, height);
let ppad = rng.gen_range(4.0, 6.0);
// TODO use a inner heart step as mask to make a white?
// to protect the paper from having too much passage, we will cut some lines based on a grid lookup.
let prec = 0.5;
let passage_limit = 10;
let minlen = 3;
let mut passage = Passage2DCounter::new(prec, width, height);
let mut paths = vec![];
for r in routes {
let mut localpassage = Passage2DCounter::new(prec, width, height);
let mut path: Vec<(f64, f64)> = vec![];
for p in r {
let localp = (p.0 - origin.0, p.1 - origin.1);
if passage.get(localp) > passage_limit {
if path.len() >= minlen {
paths.push(path);
}
path = vec![];
} else {
path.push(p);
}
localpassage.count(localp);
mask.paint_circle(&VCircle::new(p.0 - origin.0, p.1 - origin.1, ppad));
}
if path.len() >= minlen |
passage.count_once_from(&localpassage);
}
routes = paths;
let bounds = (pad, pad, width - pad, height - pad);
let in_shape = |p: (f64, f64)| -> bool {
!mask.is_painted(p) && strictly_in_boundaries(p, bounds)
};
let does_overlap = |c: &VCircle| {
in_shape((c.x, c.y))
&& circle_route((c.x, c.y), c.r, 8)
.iter()
.all(|&p| in_shape(p))
};
let ppad = rng.gen_range(0.4, 0.8);
let min = rng.gen_range(1.5, 2.0);
let max = min + rng.gen_range(0.0, 5.0);
let optim = rng.gen_range(1, 10);
let count = 2000;
let circles = packing(
&mut rng,
vec![],
5000000,
count,
optim,
ppad,
bounds,
&does_overlap,
min,
max,
);
let aligned = rng.gen_bool(0.3);
for c in circles {
let x = c.x + origin.0;
let y = c.y + origin.1;
let r = c.r;
let ang = if aligned {
0.
} else {
PI + (c.x - width / 2.0).atan2(c.y - height / 2.0)
};
routes.push(heart(x, y, r, ang));
}
routes
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let cw = width / 2.;
let ch = height / 2.;
let pad = 5.;
let cols = (width / cw).floor() as usize;
let rows = (height / ch).floor() as usize;
let offsetx = 0.0;
let offsety = 0.0;
let routes = (0..rows)
.into_par_iter()
.flat_map(|j| {
(0..cols).into_par_iter().flat_map(move |i| {
cell(
opts.seed / 7.7 + (i + j * cols) as f64 / 0.3,
(offsetx + i as f64 * cw, offsety + j as f64 * ch),
cw,
ch,
pad,
)
})
})
.collect::<Vec<Vec<(f64, f64)>>>();
vec![(routes, "black")]
.iter()
.enumerate()
.map(|(i, (routes, color))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route_curve(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.35, data));
l
})
.collect()
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("white", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
struct PaintMask {
mask: Vec<bool>,
precision: f64,
width: f64,
height: f64,
}
impl PaintMask {
fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision) as usize;
let hi = (height / precision) as usize;
Self {
mask: vec![false; wi * hi],
width,
height,
precision,
}
}
fn is_painted(&self, point: (f64, f64)) -> bool {
// check out of bounds
if point.0 <= 0.0
|| point.0 >= self.width
|| point.1 <= 0.0
|| point.1 >= self.height
{
return false;
}
let precision = self.precision;
let width = self.width;
let x = (point.0 / precision) as usize;
let y = (point.1 / precision) as usize;
let wi = (width / precision) as usize;
self.mask[x + y * wi]
}
fn paint_circle(&mut self, circle: &VCircle) {
let (minx, miny, maxx, maxy) = (
circle.x - circle.r,
circle.y - circle.r,
circle.x + circle.r,
circle.y + circle.r,
);
let precision = self.precision;
let width = self.width;
let minx = (minx / precision) as usize;
let miny = (miny / precision) as usize;
let maxx = (maxx / precision) as usize;
let maxy = (maxy / precision) as usize;
let wi = (width / precision) as usize;
let hi = (self.height / precision) as usize;
for x in minx..maxx {
if x >= wi {
continue;
}
for y in miny..maxy {
if y >= hi {
continue;
}
let point = (x as f64 * precision, y as f64 * precision);
if euclidian_dist(point, (circle.x, circle.y)) < circle.r {
self.mask[x + y * wi] = true;
}
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct VCircle {
x: f64,
y: f64,
r: f64,
}
impl VCircle {
fn new(x: f64, y: f64, r: f64) -> Self {
VCircle { x, y, r }
}
fn dist(self: &Self, c: &VCircle) -> f64 {
euclidian_dist((self.x, self.y), (c.x, c.y)) - c.r - self.r
}
fn collides(self: &Self, c: &VCircle) -> bool {
self.dist(c) <= 0.0
}
}
fn scaling_search<F: FnMut(f64) -> bool>(
mut f: F,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let mut from = min_scale;
let mut to = max_scale;
loop {
if!f(from) {
return None;
}
if to - from < 0.1 {
return Some(from);
}
let middle = (to + from) / 2.0;
if!f(middle) {
to = middle;
} else {
from = middle;
}
}
}
fn search_circle_radius(
does_overlap: &dyn Fn(&VCircle) -> bool,
circles: &Vec<VCircle>,
x: f64,
y: f64,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let overlaps = |size| {
let c = VCircle::new(x, y, size);
does_overlap(&c) &&!circles.iter().any(|other| c.collides(other))
};
scaling_search(overlaps, min_scale, max_scale)
}
fn packing<R: Rng>(
rng: &mut R,
initial_circles: Vec<VCircle>,
iterations: usize,
desired_count: usize,
optimize_size: usize,
pad: f64,
bound: (f64, f64, f64, f64),
does_overlap: &dyn Fn(&VCircle) -> bool,
min_scale: f64,
max_scale: f64,
) -> Vec<VCircle> {
let mut circles = initial_circles.clone();
let mut tries = Vec::new();
for _i in 0..iterations {
let x: f64 = rng.gen_range(bound.0, bound.2);
let y: f64 = rng.gen_range(bound.1, bound.3);
if let Some(size) =
search_circle_radius(&does_overlap, &circles, x, y, min_scale, max_scale)
{
let circle = VCircle::new(x, y, size - pad);
tries.push(circle);
if tries.len() > optimize_size {
tries.sort_by(|a, b| b.r.partial_cmp(&a.r).unwrap());
let c = tries[0];
circles.push(c.clone());
tries = Vec::new();
}
}
if circles.len() > desired_count {
break;
}
}
circles
}
pub struct Passage2DCounter {
granularity: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage2DCounter {
pub fn new(granularity: f64, width: f64, height: f64) -> Self {
let wi = (width / granularity).ceil() as usize;
let hi = (height / granularity).ceil() as usize;
let counters = vec![0; wi * hi];
Passage2DCounter {
granularity,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.granularity).ceil() as usize;
let hi = (self.height / self.granularity).ceil() as usize;
let xi = ((x / self.granularity).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.granularity).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
pub fn get(self: &Self, p: (f64, f64)) -> usize {
self.counters[self.index(p)]
}
pub fn count_once_from(self: &mut Self, other: &Self) {
for i in 0..self.counters.len() {
self.counters[i] += if other.counters[i] > 0 { 1 } else { 0 };
}
}
}
| {
paths.push(path);
} | conditional_block |
main.rs | use clap::*;
use gre::*;
use noise::*;
use rand::prelude::*;
use rayon::prelude::*;
use std::f64::consts::PI;
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "297.0")]
pub width: f64,
#[clap(short, long, default_value = "210.0")]
pub height: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
fn heart_function(t: f64) -> (f64, f64) {
let x = 16.0 * f64::sin(t).powi(3);
let y = -13.0 * f64::cos(t)
+ 5.0 * f64::cos(2.0 * t)
+ 2.0 * f64::cos(3.0 * t)
+ f64::cos(4.0 * t);
(x * 0.059, y * 0.059)
}
fn heart_spiral(ox: f64, oy: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let mut points = Vec::new();
let mut t = 0.0;
let mut r = 0.0;
let end_r = radius + 2.0 * PI * dr;
while r < end_r {
let da = 1.0 / (r + 8.0);
t += da;
r += 0.2 * dr * da;
let (x, y) = heart_function(t);
let v = r.min(radius);
let dy = 0.1 * radius * (1. - v / radius);
let p = (x * v + ox, y * v + oy + dy);
points.push(p);
}
// points.extend(circle_route((ox, oy), radius, 100));
points
}
fn heart_nested(
ox: f64,
oy: f64,
radius: f64,
dr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = radius;
while r > 0.1 {
let mut route = vec![];
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
routes.push(route);
}
r -= dr;
}
routes
}
fn heart(ox: f64, oy: f64, r: f64, ang: f64) -> Vec<(f64, f64)> {
let mut route = Vec::new();
let count = (2.0 * PI * r / 0.5).floor() as usize;
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let (x, y) = p_r((x, y), ang);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
route
}
fn heart_nested_rotating<R: Rng>(
rng: &mut R,
ox: f64,
oy: f64,
radius: f64,
extra_radius: f64,
dr: f64,
stopr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = extra_radius;
let perlin = Perlin::new();
let seed = rng.gen_range(-555., 555.);
let f = rng.gen_range(0.05, 0.1) * rng.gen_range(0.2, 1.0);
let amp = rng.gen_range(0.03, 0.08) / f;
let basen = perlin.get([seed, f * r]);
while r > stopr {
let actualr = r.min(radius);
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
let n = perlin.get([seed, f * r]) - basen;
let offr = n * amp;
let route = heart(ox, oy, actualr, offr);
routes.push(route);
}
r -= dr;
}
routes
}
fn cell(
seed: f64,
origin: (f64, f64),
width: f64,
height: f64,
pad: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut rng = rng_from_seed(seed);
let dr = rng.gen_range(0.6, 1.0);
let r = (width.min(height) / 2.0 - pad) * rng.gen_range(0.8, 1.0);
let r2 = r
* (1.0
+ rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(-1.0f64, 1.0).max(0.0));
/*if rng.gen_bool(0.1) {
routes.extend(heart_nested(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else */
if rng.gen_bool(0.1) {
routes.push(heart_spiral(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else {
let stopr = if rng.gen_bool(0.5) {
rng.gen_range(0.1, 0.7) * r
} else {
0.1
};
routes.extend(heart_nested_rotating(
&mut rng,
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
r2,
dr,
stopr,
));
}
let mut mask = PaintMask::new(0.2, width, height);
let ppad = rng.gen_range(4.0, 6.0);
// TODO use a inner heart step as mask to make a white?
// to protect the paper from having too much passage, we will cut some lines based on a grid lookup.
let prec = 0.5;
let passage_limit = 10;
let minlen = 3;
let mut passage = Passage2DCounter::new(prec, width, height);
let mut paths = vec![];
for r in routes {
let mut localpassage = Passage2DCounter::new(prec, width, height);
let mut path: Vec<(f64, f64)> = vec![];
for p in r {
let localp = (p.0 - origin.0, p.1 - origin.1);
if passage.get(localp) > passage_limit {
if path.len() >= minlen {
paths.push(path);
}
path = vec![];
} else {
path.push(p);
}
localpassage.count(localp);
mask.paint_circle(&VCircle::new(p.0 - origin.0, p.1 - origin.1, ppad));
}
if path.len() >= minlen {
paths.push(path);
}
passage.count_once_from(&localpassage);
}
routes = paths;
let bounds = (pad, pad, width - pad, height - pad);
let in_shape = |p: (f64, f64)| -> bool {
!mask.is_painted(p) && strictly_in_boundaries(p, bounds)
};
let does_overlap = |c: &VCircle| {
in_shape((c.x, c.y))
&& circle_route((c.x, c.y), c.r, 8)
.iter()
.all(|&p| in_shape(p))
};
let ppad = rng.gen_range(0.4, 0.8);
let min = rng.gen_range(1.5, 2.0);
let max = min + rng.gen_range(0.0, 5.0);
let optim = rng.gen_range(1, 10);
let count = 2000;
let circles = packing(
&mut rng,
vec![],
5000000,
count,
optim,
ppad,
bounds,
&does_overlap,
min,
max,
);
let aligned = rng.gen_bool(0.3);
for c in circles {
let x = c.x + origin.0;
let y = c.y + origin.1;
let r = c.r;
let ang = if aligned {
0.
} else {
PI + (c.x - width / 2.0).atan2(c.y - height / 2.0)
};
routes.push(heart(x, y, r, ang));
}
routes
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let cw = width / 2.;
let ch = height / 2.;
let pad = 5.;
let cols = (width / cw).floor() as usize;
let rows = (height / ch).floor() as usize;
let offsetx = 0.0;
let offsety = 0.0;
let routes = (0..rows)
.into_par_iter()
.flat_map(|j| {
(0..cols).into_par_iter().flat_map(move |i| {
cell(
opts.seed / 7.7 + (i + j * cols) as f64 / 0.3,
(offsetx + i as f64 * cw, offsety + j as f64 * ch),
cw,
ch,
pad,
)
})
})
.collect::<Vec<Vec<(f64, f64)>>>();
vec![(routes, "black")]
.iter()
.enumerate()
.map(|(i, (routes, color))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route_curve(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.35, data));
l
})
.collect()
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("white", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
struct PaintMask {
mask: Vec<bool>,
precision: f64,
width: f64,
height: f64,
}
impl PaintMask {
fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision) as usize;
let hi = (height / precision) as usize;
Self {
mask: vec![false; wi * hi],
width,
height,
precision,
}
}
fn is_painted(&self, point: (f64, f64)) -> bool {
// check out of bounds
if point.0 <= 0.0
|| point.0 >= self.width
|| point.1 <= 0.0
|| point.1 >= self.height
{
return false;
}
let precision = self.precision;
let width = self.width;
let x = (point.0 / precision) as usize;
let y = (point.1 / precision) as usize;
let wi = (width / precision) as usize;
self.mask[x + y * wi]
}
fn paint_circle(&mut self, circle: &VCircle) {
let (minx, miny, maxx, maxy) = (
circle.x - circle.r,
circle.y - circle.r,
circle.x + circle.r,
circle.y + circle.r,
);
let precision = self.precision;
let width = self.width;
let minx = (minx / precision) as usize;
let miny = (miny / precision) as usize;
let maxx = (maxx / precision) as usize;
let maxy = (maxy / precision) as usize;
let wi = (width / precision) as usize;
let hi = (self.height / precision) as usize;
for x in minx..maxx {
if x >= wi {
continue;
}
for y in miny..maxy {
if y >= hi {
continue;
}
let point = (x as f64 * precision, y as f64 * precision);
if euclidian_dist(point, (circle.x, circle.y)) < circle.r {
self.mask[x + y * wi] = true;
}
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct VCircle {
x: f64,
y: f64,
r: f64,
}
impl VCircle {
fn new(x: f64, y: f64, r: f64) -> Self {
VCircle { x, y, r }
}
fn dist(self: &Self, c: &VCircle) -> f64 {
euclidian_dist((self.x, self.y), (c.x, c.y)) - c.r - self.r
}
fn collides(self: &Self, c: &VCircle) -> bool {
self.dist(c) <= 0.0
}
}
fn scaling_search<F: FnMut(f64) -> bool>(
mut f: F,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let mut from = min_scale;
let mut to = max_scale;
loop {
if!f(from) {
return None;
}
if to - from < 0.1 {
return Some(from);
}
let middle = (to + from) / 2.0;
if!f(middle) {
to = middle;
} else {
from = middle;
}
}
}
fn search_circle_radius(
does_overlap: &dyn Fn(&VCircle) -> bool,
circles: &Vec<VCircle>,
x: f64,
y: f64,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let overlaps = |size| {
let c = VCircle::new(x, y, size);
does_overlap(&c) &&!circles.iter().any(|other| c.collides(other))
};
scaling_search(overlaps, min_scale, max_scale)
}
fn packing<R: Rng>(
rng: &mut R,
initial_circles: Vec<VCircle>,
iterations: usize,
desired_count: usize,
optimize_size: usize,
pad: f64,
bound: (f64, f64, f64, f64),
does_overlap: &dyn Fn(&VCircle) -> bool,
min_scale: f64,
max_scale: f64,
) -> Vec<VCircle> {
let mut circles = initial_circles.clone();
let mut tries = Vec::new();
for _i in 0..iterations {
let x: f64 = rng.gen_range(bound.0, bound.2);
let y: f64 = rng.gen_range(bound.1, bound.3);
if let Some(size) =
search_circle_radius(&does_overlap, &circles, x, y, min_scale, max_scale)
{
let circle = VCircle::new(x, y, size - pad);
tries.push(circle);
if tries.len() > optimize_size {
tries.sort_by(|a, b| b.r.partial_cmp(&a.r).unwrap());
let c = tries[0];
circles.push(c.clone());
tries = Vec::new();
}
}
if circles.len() > desired_count {
break;
}
}
circles
}
pub struct Passage2DCounter {
granularity: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage2DCounter {
pub fn new(granularity: f64, width: f64, height: f64) -> Self |
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.granularity).ceil() as usize;
let hi = (self.height / self.granularity).ceil() as usize;
let xi = ((x / self.granularity).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.granularity).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
pub fn get(self: &Self, p: (f64, f64)) -> usize {
self.counters[self.index(p)]
}
pub fn count_once_from(self: &mut Self, other: &Self) {
for i in 0..self.counters.len() {
self.counters[i] += if other.counters[i] > 0 { 1 } else { 0 };
}
}
}
| {
let wi = (width / granularity).ceil() as usize;
let hi = (height / granularity).ceil() as usize;
let counters = vec![0; wi * hi];
Passage2DCounter {
granularity,
width,
height,
counters,
}
} | identifier_body |
main.rs | use clap::*;
use gre::*;
use noise::*;
use rand::prelude::*;
use rayon::prelude::*;
use std::f64::consts::PI;
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "297.0")]
pub width: f64,
#[clap(short, long, default_value = "210.0")]
pub height: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
fn heart_function(t: f64) -> (f64, f64) {
let x = 16.0 * f64::sin(t).powi(3);
let y = -13.0 * f64::cos(t)
+ 5.0 * f64::cos(2.0 * t)
+ 2.0 * f64::cos(3.0 * t)
+ f64::cos(4.0 * t);
(x * 0.059, y * 0.059)
}
fn heart_spiral(ox: f64, oy: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let mut points = Vec::new();
let mut t = 0.0;
let mut r = 0.0;
let end_r = radius + 2.0 * PI * dr;
while r < end_r {
let da = 1.0 / (r + 8.0);
t += da;
r += 0.2 * dr * da;
let (x, y) = heart_function(t);
let v = r.min(radius);
let dy = 0.1 * radius * (1. - v / radius);
let p = (x * v + ox, y * v + oy + dy);
points.push(p);
}
// points.extend(circle_route((ox, oy), radius, 100));
points
}
fn heart_nested(
ox: f64,
oy: f64,
radius: f64,
dr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = radius;
while r > 0.1 {
let mut route = vec![];
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
routes.push(route);
}
r -= dr;
}
routes
}
fn heart(ox: f64, oy: f64, r: f64, ang: f64) -> Vec<(f64, f64)> {
let mut route = Vec::new();
let count = (2.0 * PI * r / 0.5).floor() as usize;
for i in 0..count {
let a = i as f64 * 2.0 * PI / (count as f64);
let (x, y) = heart_function(a);
let (x, y) = p_r((x, y), ang);
let p = (x * r + ox, y * r + oy);
route.push(p);
}
route.push(route[0]);
route
}
fn heart_nested_rotating<R: Rng>(
rng: &mut R,
ox: f64,
oy: f64,
radius: f64,
extra_radius: f64,
dr: f64,
stopr: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut r = extra_radius;
let perlin = Perlin::new();
let seed = rng.gen_range(-555., 555.);
let f = rng.gen_range(0.05, 0.1) * rng.gen_range(0.2, 1.0);
let amp = rng.gen_range(0.03, 0.08) / f;
let basen = perlin.get([seed, f * r]);
while r > stopr {
let actualr = r.min(radius);
let count = (2.0 * PI * r / 0.5).floor() as usize;
if count > 3 {
let n = perlin.get([seed, f * r]) - basen;
let offr = n * amp;
let route = heart(ox, oy, actualr, offr);
routes.push(route);
}
r -= dr;
}
routes
}
fn cell(
seed: f64,
origin: (f64, f64),
width: f64,
height: f64,
pad: f64,
) -> Vec<Vec<(f64, f64)>> {
let mut routes = Vec::new();
let mut rng = rng_from_seed(seed);
let dr = rng.gen_range(0.6, 1.0);
let r = (width.min(height) / 2.0 - pad) * rng.gen_range(0.8, 1.0);
let r2 = r
* (1.0
+ rng.gen_range(0.0, 1.0)
* rng.gen_range(0.0, 1.0)
* rng.gen_range(-1.0f64, 1.0).max(0.0));
/*if rng.gen_bool(0.1) {
routes.extend(heart_nested(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else */
if rng.gen_bool(0.1) {
routes.push(heart_spiral(
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
dr,
));
} else {
let stopr = if rng.gen_bool(0.5) {
rng.gen_range(0.1, 0.7) * r
} else {
0.1
};
routes.extend(heart_nested_rotating(
&mut rng,
origin.0 + width / 2.0,
origin.1 + height / 2.0,
r,
r2,
dr,
stopr,
));
}
let mut mask = PaintMask::new(0.2, width, height);
let ppad = rng.gen_range(4.0, 6.0);
// TODO use a inner heart step as mask to make a white?
// to protect the paper from having too much passage, we will cut some lines based on a grid lookup.
let prec = 0.5;
let passage_limit = 10;
let minlen = 3;
let mut passage = Passage2DCounter::new(prec, width, height);
let mut paths = vec![];
for r in routes {
let mut localpassage = Passage2DCounter::new(prec, width, height);
let mut path: Vec<(f64, f64)> = vec![];
for p in r {
let localp = (p.0 - origin.0, p.1 - origin.1);
if passage.get(localp) > passage_limit {
if path.len() >= minlen {
paths.push(path);
}
path = vec![];
} else {
path.push(p);
}
localpassage.count(localp);
mask.paint_circle(&VCircle::new(p.0 - origin.0, p.1 - origin.1, ppad));
}
if path.len() >= minlen {
paths.push(path);
}
passage.count_once_from(&localpassage);
}
routes = paths;
let bounds = (pad, pad, width - pad, height - pad);
let in_shape = |p: (f64, f64)| -> bool {
!mask.is_painted(p) && strictly_in_boundaries(p, bounds)
};
let does_overlap = |c: &VCircle| {
in_shape((c.x, c.y))
&& circle_route((c.x, c.y), c.r, 8)
.iter()
.all(|&p| in_shape(p))
};
let ppad = rng.gen_range(0.4, 0.8);
let min = rng.gen_range(1.5, 2.0);
let max = min + rng.gen_range(0.0, 5.0);
let optim = rng.gen_range(1, 10);
let count = 2000;
let circles = packing(
&mut rng,
vec![],
5000000,
count,
optim,
ppad,
bounds,
&does_overlap,
min,
max,
);
let aligned = rng.gen_bool(0.3);
for c in circles {
let x = c.x + origin.0;
let y = c.y + origin.1;
let r = c.r;
let ang = if aligned {
0.
} else { | routes.push(heart(x, y, r, ang));
}
routes
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let cw = width / 2.;
let ch = height / 2.;
let pad = 5.;
let cols = (width / cw).floor() as usize;
let rows = (height / ch).floor() as usize;
let offsetx = 0.0;
let offsety = 0.0;
let routes = (0..rows)
.into_par_iter()
.flat_map(|j| {
(0..cols).into_par_iter().flat_map(move |i| {
cell(
opts.seed / 7.7 + (i + j * cols) as f64 / 0.3,
(offsetx + i as f64 * cw, offsety + j as f64 * ch),
cw,
ch,
pad,
)
})
})
.collect::<Vec<Vec<(f64, f64)>>>();
vec![(routes, "black")]
.iter()
.enumerate()
.map(|(i, (routes, color))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route_curve(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.35, data));
l
})
.collect()
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("white", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
struct PaintMask {
mask: Vec<bool>,
precision: f64,
width: f64,
height: f64,
}
impl PaintMask {
fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision) as usize;
let hi = (height / precision) as usize;
Self {
mask: vec![false; wi * hi],
width,
height,
precision,
}
}
fn is_painted(&self, point: (f64, f64)) -> bool {
// check out of bounds
if point.0 <= 0.0
|| point.0 >= self.width
|| point.1 <= 0.0
|| point.1 >= self.height
{
return false;
}
let precision = self.precision;
let width = self.width;
let x = (point.0 / precision) as usize;
let y = (point.1 / precision) as usize;
let wi = (width / precision) as usize;
self.mask[x + y * wi]
}
fn paint_circle(&mut self, circle: &VCircle) {
let (minx, miny, maxx, maxy) = (
circle.x - circle.r,
circle.y - circle.r,
circle.x + circle.r,
circle.y + circle.r,
);
let precision = self.precision;
let width = self.width;
let minx = (minx / precision) as usize;
let miny = (miny / precision) as usize;
let maxx = (maxx / precision) as usize;
let maxy = (maxy / precision) as usize;
let wi = (width / precision) as usize;
let hi = (self.height / precision) as usize;
for x in minx..maxx {
if x >= wi {
continue;
}
for y in miny..maxy {
if y >= hi {
continue;
}
let point = (x as f64 * precision, y as f64 * precision);
if euclidian_dist(point, (circle.x, circle.y)) < circle.r {
self.mask[x + y * wi] = true;
}
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct VCircle {
x: f64,
y: f64,
r: f64,
}
impl VCircle {
fn new(x: f64, y: f64, r: f64) -> Self {
VCircle { x, y, r }
}
fn dist(self: &Self, c: &VCircle) -> f64 {
euclidian_dist((self.x, self.y), (c.x, c.y)) - c.r - self.r
}
fn collides(self: &Self, c: &VCircle) -> bool {
self.dist(c) <= 0.0
}
}
fn scaling_search<F: FnMut(f64) -> bool>(
mut f: F,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let mut from = min_scale;
let mut to = max_scale;
loop {
if!f(from) {
return None;
}
if to - from < 0.1 {
return Some(from);
}
let middle = (to + from) / 2.0;
if!f(middle) {
to = middle;
} else {
from = middle;
}
}
}
fn search_circle_radius(
does_overlap: &dyn Fn(&VCircle) -> bool,
circles: &Vec<VCircle>,
x: f64,
y: f64,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let overlaps = |size| {
let c = VCircle::new(x, y, size);
does_overlap(&c) &&!circles.iter().any(|other| c.collides(other))
};
scaling_search(overlaps, min_scale, max_scale)
}
fn packing<R: Rng>(
rng: &mut R,
initial_circles: Vec<VCircle>,
iterations: usize,
desired_count: usize,
optimize_size: usize,
pad: f64,
bound: (f64, f64, f64, f64),
does_overlap: &dyn Fn(&VCircle) -> bool,
min_scale: f64,
max_scale: f64,
) -> Vec<VCircle> {
let mut circles = initial_circles.clone();
let mut tries = Vec::new();
for _i in 0..iterations {
let x: f64 = rng.gen_range(bound.0, bound.2);
let y: f64 = rng.gen_range(bound.1, bound.3);
if let Some(size) =
search_circle_radius(&does_overlap, &circles, x, y, min_scale, max_scale)
{
let circle = VCircle::new(x, y, size - pad);
tries.push(circle);
if tries.len() > optimize_size {
tries.sort_by(|a, b| b.r.partial_cmp(&a.r).unwrap());
let c = tries[0];
circles.push(c.clone());
tries = Vec::new();
}
}
if circles.len() > desired_count {
break;
}
}
circles
}
pub struct Passage2DCounter {
granularity: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage2DCounter {
pub fn new(granularity: f64, width: f64, height: f64) -> Self {
let wi = (width / granularity).ceil() as usize;
let hi = (height / granularity).ceil() as usize;
let counters = vec![0; wi * hi];
Passage2DCounter {
granularity,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.granularity).ceil() as usize;
let hi = (self.height / self.granularity).ceil() as usize;
let xi = ((x / self.granularity).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.granularity).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
pub fn get(self: &Self, p: (f64, f64)) -> usize {
self.counters[self.index(p)]
}
pub fn count_once_from(self: &mut Self, other: &Self) {
for i in 0..self.counters.len() {
self.counters[i] += if other.counters[i] > 0 { 1 } else { 0 };
}
}
} | PI + (c.x - width / 2.0).atan2(c.y - height / 2.0)
}; | random_line_split |
runner.rs | comn::TickNum, ReceivedState>,
received_events: BTreeMap<comn::TickNum, Vec<comn::Event>>,
prediction: Option<Prediction>,
interp_game_time: comn::GameTime,
next_tick_num: Option<comn::TickNum>,
start_time: Instant,
recv_tick_time: GameTimeEstimation,
next_time_warp_factor: f32,
ping: PingEstimation,
stats: Stats,
}
impl Runner {
pub fn new(join: comn::JoinSuccess, webrtc_client: webrtc::Client) -> Self {
let prediction = Some(Prediction::new(join.your_player_id));
let recv_tick_time = GameTimeEstimation::new(join.game_settings.tick_period());
Self {
settings: Arc::new(join.game_settings),
my_token: join.your_token,
my_player_id: join.your_player_id,
webrtc_client,
disconnected: false,
last_inputs: VecDeque::new(),
received_states: BTreeMap::new(),
received_events: BTreeMap::new(),
prediction,
interp_game_time: 0.0,
next_tick_num: None,
start_time: Instant::now(),
recv_tick_time,
next_time_warp_factor: 1.0,
ping: PingEstimation::default(),
stats: Stats::default(),
}
}
pub fn my_player_id(&self) -> comn::PlayerId {
self.my_player_id
}
pub fn is_good(&self) -> bool {
self.webrtc_client.status() == webrtc::Status::Open
&&!self.disconnected
&&!self.ping.is_timeout(Instant::now())
}
pub fn settings(&self) -> &comn::Settings {
&self.settings
}
pub fn stats(&self) -> &Stats {
&self.stats
}
pub fn ping(&self) -> &PingEstimation {
&self.ping
}
pub fn interp_game_time(&self) -> comn::GameTime {
self.interp_game_time
}
fn target_time_lag(&self) -> comn::GameTime {
self.settings.tick_period() * 1.5
}
fn tick_num(&self) -> comn::TickNum {
comn::TickNum((self.interp_game_time / self.settings.tick_period()) as u32)
}
pub fn update(&mut self, now: Instant, dt: Duration, input: &comn::Input) -> Vec<comn::Event> {
assert!(self.is_good());
{
coarse_prof::profile!("webrtc");
self.webrtc_client.set_now((Instant::now(), now));
while let Some((recv_time, message)) = self.webrtc_client.take_message() {
self.handle_message(recv_time, message);
}
}
if let Some(sequence_num) = self.ping.next_ping_sequence_num(now) {
self.send(comn::ClientMessage::Ping(sequence_num));
}
// Determine new local game time, making sure to stay behind the receive
// stream by our desired lag time. We do this so that we have ticks
// between which we can interpolate.
//
// If we are off too far from our lag target, slow down or speed up
// playback time.
let time_since_start = now.duration_since(self.start_time).as_secs_f32();
let recv_game_time = self.recv_tick_time.estimate(time_since_start);
let new_interp_game_time = if let Some(recv_game_time) = recv_game_time {
let current_time_lag = recv_game_time - (self.interp_game_time + dt.as_secs_f32());
let time_lag_deviation = self.target_time_lag() - current_time_lag;
self.stats
.time_lag_deviation_ms
.record(time_lag_deviation * 1000.0);
if time_lag_deviation.abs() < MAX_TIME_LAG_DEVIATION {
/*let k = 0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.05).exp());
if time_lag_deviation > 0.0 {
1.0 / k
} else {
k
}*/
//0.5 * ((-time_lag_deviation).tanh() + 2.0)
self.next_time_warp_factor =
0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.005).exp());
self.interp_game_time + self.next_time_warp_factor * dt.as_secs_f32()
} else {
// Our playback time is too far off, just jump directly to the
// target time.
let target_time = recv_game_time - self.target_time_lag();
info!(
"Time is off by {}, jumping to {}",
time_lag_deviation, target_time
);
target_time
}
} else {
// We have no knowledge of the tick receive time, probably didn't
// receive the first tick packet yet.
self.interp_game_time
};
// Don't let time run further than the ticks that we have received.
// This is here so that we stop local time if the server drops or
// starts lagging heavily.
let max_tick_num = self
.received_states
.keys()
.rev()
.next()
.copied()
.unwrap_or(comn::TickNum(0))
.max(self.tick_num())
.max(self.next_tick_num.unwrap_or(comn::TickNum(0)));
// Advance our playback time.
let prev_tick_num = self.tick_num();
self.interp_game_time =
new_interp_game_time.min(self.settings.tick_game_time(max_tick_num));
let new_tick_num = self.tick_num();
// Look at all the intermediate ticks. We will have one of the
// following cases:
//
// 1. In this update call, the tick number did not change, so
// `prev_tick_num == new_tick_num`.
// 2. We crossed one tick, e.g. prev_tick_num is 7 and new_tick_num is
// 8.
// 3. We crossed more than one tick. This should happen only on lag
// spikes, be it local or in the network.
let mut crossed_tick_nums: Vec<comn::TickNum> = (prev_tick_num.0 + 1..=new_tick_num.0)
.map(comn::TickNum)
.collect();
if crossed_tick_nums.len() > MAX_TICKS_PER_UPDATE {
// It's possible that we have a large jump in ticks, e.g. due to a
// lag spike, or because we are running in a background tab. In this
// case, we don't want to overload ourselves by sending many input
// packets and performing prediction over many ticks. Instead, we
// just jump directly to the last couple of ticks.
info!("Crossed {} ticks, will skip", crossed_tick_nums.len());
// TODO: In order to nicely reinitialize prediction, we should take
// those crossed ticks for which we actually received a server
// state...
crossed_tick_nums.drain(0..crossed_tick_nums.len() - MAX_TICKS_PER_UPDATE);
assert!(crossed_tick_nums.len() == MAX_TICKS_PER_UPDATE);
}
// Iterate over all the ticks that we have crossed, also including
// those for which we did not receive anything from the server.
let mut events = Vec::new();
for tick_num in crossed_tick_nums.iter() {
coarse_prof::profile!("tick");
// For debugging, keep track of how many ticks we do not
// receive server data on time.
if self.received_states.get(tick_num).is_some() {
self.stats.skip_loss.record_received(tick_num.0 as usize);
}
// Start server events of crossed ticks.
if let Some(tick_events) = self.received_events.get(tick_num) {
events.extend(tick_events.clone().into_iter());
self.received_events.remove(tick_num);
}
// Send inputs for server ticks we cross.
self.last_inputs.push_back((*tick_num, input.clone()));
while self.last_inputs.len() > comn::MAX_INPUTS_PER_MESSAGE {
self.last_inputs.pop_front();
}
self.send(comn::ClientMessage::Input(
self.last_inputs.iter().cloned().collect(),
));
// Predict effects of our own input locally.
if let Some(prediction) = self.prediction.as_mut() {
coarse_prof::profile!("predict");
prediction.record_tick_input(
*tick_num,
input.clone(),
self.received_states.get(tick_num),
);
}
}
coarse_prof::profile!("cleanup");
if self.next_tick_num <= Some(self.tick_num()) {
// We have reached the tick that we were interpolating into, so
// we'll need to look for the next interpolation target.
self.next_tick_num = None;
}
// Do we have a tick to interpolate into ready?
if self.next_tick_num.is_none() {
let min_ready_num = self.received_states.keys().find(|tick_num| {
**tick_num > self.tick_num() && tick_num.0 - self.tick_num().0 <= 3
});
if let Some(min_ready_num) = min_ready_num {
self.next_tick_num = Some(*min_ready_num);
}
}
// Remove events for older ticks, we will no longer need them. Note,
// however, that the same cannot be said about the received states,
// since we may still need them as the basis for delta decoding.
// Received states are only pruned when we receive new states.
{
let remove_tick_nums: Vec<comn::TickNum> = self
.received_events
.keys()
.copied()
.filter(|tick_num| *tick_num < self.tick_num())
.collect();
for tick_num in remove_tick_nums {
self.received_events.remove(&tick_num);
}
}
// Keep some statistics for debugging...
if let Some(recv_game_time) = recv_game_time {
self.stats
.time_lag_ms
.record((recv_game_time - self.interp_game_time) * 1000.0);
} else {
// We cannot estimate the server time, so we probably disconnected
// or just connected.
self.stats.time_lag_ms = stats::Var::default();
}
self.stats
.tick_interp
.record(self.next_tick_num.map_or(0.0, |next_tick_num| {
(next_tick_num.0 - self.tick_num().0) as f32
}));
self.stats
.time_warp_factor
.record(self.next_time_warp_factor);
self.stats.send_rate = self.webrtc_client.send_rate();
self.stats.recv_rate = self.webrtc_client.recv_rate();
self.stats.recv_delay_std_dev = self.recv_tick_time.recv_delay_std_dev().unwrap_or(-1.0);
events
}
// TODO: Both `state` and `next_entities` need to be revised
pub fn state(&self) -> Option<comn::Game> {
// Due to loss, we might not always have an authorative state for the
// current tick num. Take the closest one then.
let mut state = self
.received_states
.iter()
.filter(|(tick_num, _)| **tick_num <= self.tick_num())
.next_back()
.map(|(_, state)| state.game.clone());
// When using prediction, overwrite the predicted entities in the
// authorative state.
if let Some(state) = state.as_mut() {
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num()));
if let Some(predicted_entities) = predicted_entities {
state.entities.extend(
predicted_entities
.iter()
.map(|(entity_id, entity)| (*entity_id, entity.clone())),
);
}
}
state
}
pub fn next_entities(&self) -> BTreeMap<comn::EntityId, (comn::GameTime, comn::Entity)> {
let mut entities = BTreeMap::new();
// Add entities from authorative state, if available.
let next_state = self
.next_tick_num
.and_then(|key| self.received_states.get(&key).map(|value| (key, value)));
if let Some((recv_tick_num, recv_state)) = next_state {
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
entities.extend(
recv_state
.game
.entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (recv_game_time, entity))),
);
}
// Add entities from predicted state, if available. Note that, due to
// loss in ticks received from the server, these entities might live in
// a different time from the authorative entities.
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num().next()));
if let Some(predicted_entities) = predicted_entities {
let pred_game_time = self.settings.tick_game_time(self.tick_num().next());
entities.extend(
predicted_entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (pred_game_time, entity))),
);
}
entities
}
fn handle_message(&mut self, recv_time: Instant, message: comn::ServerMessage) {
coarse_prof::profile!("handle_message");
match message {
comn::ServerMessage::Ping(_) => {
// Handled in on_message callback to get better ping
// estimates.
}
comn::ServerMessage::Pong(sequence_num) => {
if self.ping.record_pong(recv_time, sequence_num).is_err() {
debug!(
"Ignoring pong with invalid sequence number {:?}",
sequence_num
);
}
}
comn::ServerMessage::Tick(tick) => {
self.record_server_tick(recv_time, tick);
}
comn::ServerMessage::Disconnect => {
self.disconnected = true;
}
}
}
pub fn disconnect(&mut self) {
// Send unreliable message a few times to increase chance of arrival.
for _ in 0..3 {
self.send(comn::ClientMessage::Disconnect);
}
self.disconnected = true;
}
fn send(&self, message: comn::ClientMessage) {
coarse_prof::profile!("send");
let signed_message = comn::SignedClientMessage(self.my_token, message);
let data = signed_message.serialize();
coarse_prof::profile!("webrtc");
if let Err(err) = self.webrtc_client.send(&data) {
warn!("Failed to send message: {:?}", err);
}
}
fn record_server_tick(&mut self, recv_time: Instant, tick: comn::Tick) | if!self.recv_tick_time.has_started() {
// If this is the first tick we have recevied from the server, reset
// to the correct time
self.interp_game_time = recv_game_time;
info!("Starting tick stream at recv_game_time={}", recv_game_time);
}
let mut new_state = if let Some(diff_base_num) = tick.diff_base {
// This tick has been delta encoded w.r.t. some tick
// that we have acknowledged receiving.
let received_state = if let Some(received_state) =
self.received_states.get(&diff_base_num)
{
received_state.game.clone()
} else {
// This should only happen if packets are severely
// reordered and delayed.
warn!(
"Received state {:?} encoded w.r.t. tick num {:?}, which we do not have (our oldest is {:?})",
recv_tick_num,
diff_base_num,
self.received_states.keys().next(),
);
return;
};
// The fact that we received a tick encoded w.r.t. this base means
// that we can forgot any older ticks -- the server will never
// again send a new tick encoded w.r.t | {
let recv_tick_num = tick.diff.tick_num;
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
// Keep some statistics for debugging...
self.stats.loss.record_received(recv_tick_num.0 as usize);
if let Some(my_last_input_num) = tick.your_last_input_num.as_ref() {
self.stats
.input_delay
.record((recv_tick_num.0 - my_last_input_num.0) as f32 - 1.0);
}
if recv_game_time < self.interp_game_time {
debug!(
"Ignoring old tick of time {} vs our interp_game_time={}",
recv_game_time, self.interp_game_time,
);
return;
}
| identifier_body |
runner.rs | <comn::TickNum, ReceivedState>,
received_events: BTreeMap<comn::TickNum, Vec<comn::Event>>,
prediction: Option<Prediction>,
interp_game_time: comn::GameTime,
next_tick_num: Option<comn::TickNum>,
start_time: Instant,
recv_tick_time: GameTimeEstimation,
next_time_warp_factor: f32,
ping: PingEstimation,
stats: Stats,
}
impl Runner {
pub fn new(join: comn::JoinSuccess, webrtc_client: webrtc::Client) -> Self {
let prediction = Some(Prediction::new(join.your_player_id));
let recv_tick_time = GameTimeEstimation::new(join.game_settings.tick_period());
Self {
settings: Arc::new(join.game_settings),
my_token: join.your_token,
my_player_id: join.your_player_id,
webrtc_client,
disconnected: false,
last_inputs: VecDeque::new(),
received_states: BTreeMap::new(),
received_events: BTreeMap::new(),
prediction,
interp_game_time: 0.0,
next_tick_num: None,
start_time: Instant::now(),
recv_tick_time,
next_time_warp_factor: 1.0,
ping: PingEstimation::default(),
stats: Stats::default(),
}
}
pub fn my_player_id(&self) -> comn::PlayerId {
self.my_player_id
}
pub fn is_good(&self) -> bool {
self.webrtc_client.status() == webrtc::Status::Open
&&!self.disconnected
&&!self.ping.is_timeout(Instant::now())
}
pub fn settings(&self) -> &comn::Settings {
&self.settings
}
pub fn stats(&self) -> &Stats {
&self.stats
}
pub fn ping(&self) -> &PingEstimation {
&self.ping
}
pub fn interp_game_time(&self) -> comn::GameTime {
self.interp_game_time
}
fn target_time_lag(&self) -> comn::GameTime {
self.settings.tick_period() * 1.5
}
fn tick_num(&self) -> comn::TickNum {
comn::TickNum((self.interp_game_time / self.settings.tick_period()) as u32)
}
pub fn update(&mut self, now: Instant, dt: Duration, input: &comn::Input) -> Vec<comn::Event> {
assert!(self.is_good());
{
coarse_prof::profile!("webrtc");
self.webrtc_client.set_now((Instant::now(), now));
while let Some((recv_time, message)) = self.webrtc_client.take_message() {
self.handle_message(recv_time, message);
}
}
if let Some(sequence_num) = self.ping.next_ping_sequence_num(now) {
self.send(comn::ClientMessage::Ping(sequence_num));
}
// Determine new local game time, making sure to stay behind the receive
// stream by our desired lag time. We do this so that we have ticks
// between which we can interpolate.
//
// If we are off too far from our lag target, slow down or speed up
// playback time.
let time_since_start = now.duration_since(self.start_time).as_secs_f32();
let recv_game_time = self.recv_tick_time.estimate(time_since_start);
let new_interp_game_time = if let Some(recv_game_time) = recv_game_time {
let current_time_lag = recv_game_time - (self.interp_game_time + dt.as_secs_f32());
let time_lag_deviation = self.target_time_lag() - current_time_lag;
self.stats
.time_lag_deviation_ms
.record(time_lag_deviation * 1000.0);
if time_lag_deviation.abs() < MAX_TIME_LAG_DEVIATION {
/*let k = 0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.05).exp());
if time_lag_deviation > 0.0 {
1.0 / k
} else {
k
}*/
//0.5 * ((-time_lag_deviation).tanh() + 2.0)
self.next_time_warp_factor =
0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.005).exp());
self.interp_game_time + self.next_time_warp_factor * dt.as_secs_f32()
} else {
// Our playback time is too far off, just jump directly to the
// target time.
let target_time = recv_game_time - self.target_time_lag();
info!(
"Time is off by {}, jumping to {}",
time_lag_deviation, target_time
);
target_time
}
} else {
// We have no knowledge of the tick receive time, probably didn't
// receive the first tick packet yet.
self.interp_game_time
};
// Don't let time run further than the ticks that we have received.
// This is here so that we stop local time if the server drops or
// starts lagging heavily.
let max_tick_num = self
.received_states
.keys()
.rev()
.next()
.copied()
.unwrap_or(comn::TickNum(0))
.max(self.tick_num())
.max(self.next_tick_num.unwrap_or(comn::TickNum(0)));
// Advance our playback time.
let prev_tick_num = self.tick_num();
self.interp_game_time =
new_interp_game_time.min(self.settings.tick_game_time(max_tick_num));
let new_tick_num = self.tick_num();
// Look at all the intermediate ticks. We will have one of the
// following cases:
//
// 1. In this update call, the tick number did not change, so
// `prev_tick_num == new_tick_num`.
// 2. We crossed one tick, e.g. prev_tick_num is 7 and new_tick_num is
// 8.
// 3. We crossed more than one tick. This should happen only on lag
// spikes, be it local or in the network.
let mut crossed_tick_nums: Vec<comn::TickNum> = (prev_tick_num.0 + 1..=new_tick_num.0)
.map(comn::TickNum)
.collect();
if crossed_tick_nums.len() > MAX_TICKS_PER_UPDATE {
// It's possible that we have a large jump in ticks, e.g. due to a
// lag spike, or because we are running in a background tab. In this
// case, we don't want to overload ourselves by sending many input
// packets and performing prediction over many ticks. Instead, we
// just jump directly to the last couple of ticks.
info!("Crossed {} ticks, will skip", crossed_tick_nums.len());
// TODO: In order to nicely reinitialize prediction, we should take
// those crossed ticks for which we actually received a server
// state...
crossed_tick_nums.drain(0..crossed_tick_nums.len() - MAX_TICKS_PER_UPDATE);
assert!(crossed_tick_nums.len() == MAX_TICKS_PER_UPDATE);
}
// Iterate over all the ticks that we have crossed, also including
// those for which we did not receive anything from the server.
let mut events = Vec::new();
for tick_num in crossed_tick_nums.iter() {
coarse_prof::profile!("tick");
// For debugging, keep track of how many ticks we do not
// receive server data on time.
if self.received_states.get(tick_num).is_some() {
self.stats.skip_loss.record_received(tick_num.0 as usize);
}
// Start server events of crossed ticks.
if let Some(tick_events) = self.received_events.get(tick_num) {
events.extend(tick_events.clone().into_iter());
self.received_events.remove(tick_num);
}
// Send inputs for server ticks we cross.
self.last_inputs.push_back((*tick_num, input.clone()));
while self.last_inputs.len() > comn::MAX_INPUTS_PER_MESSAGE {
self.last_inputs.pop_front();
}
self.send(comn::ClientMessage::Input(
self.last_inputs.iter().cloned().collect(),
));
// Predict effects of our own input locally.
if let Some(prediction) = self.prediction.as_mut() {
coarse_prof::profile!("predict");
prediction.record_tick_input(
*tick_num,
input.clone(),
self.received_states.get(tick_num),
);
}
}
coarse_prof::profile!("cleanup");
if self.next_tick_num <= Some(self.tick_num()) {
// We have reached the tick that we were interpolating into, so
// we'll need to look for the next interpolation target.
self.next_tick_num = None;
}
// Do we have a tick to interpolate into ready?
if self.next_tick_num.is_none() {
let min_ready_num = self.received_states.keys().find(|tick_num| {
**tick_num > self.tick_num() && tick_num.0 - self.tick_num().0 <= 3
});
if let Some(min_ready_num) = min_ready_num {
self.next_tick_num = Some(*min_ready_num);
}
}
// Remove events for older ticks, we will no longer need them. Note,
// however, that the same cannot be said about the received states,
// since we may still need them as the basis for delta decoding.
// Received states are only pruned when we receive new states.
{
let remove_tick_nums: Vec<comn::TickNum> = self
.received_events
.keys()
.copied()
.filter(|tick_num| *tick_num < self.tick_num())
.collect();
for tick_num in remove_tick_nums {
self.received_events.remove(&tick_num);
}
}
// Keep some statistics for debugging...
if let Some(recv_game_time) = recv_game_time {
self.stats
.time_lag_ms
.record((recv_game_time - self.interp_game_time) * 1000.0);
} else {
// We cannot estimate the server time, so we probably disconnected
// or just connected.
self.stats.time_lag_ms = stats::Var::default();
}
self.stats
.tick_interp
.record(self.next_tick_num.map_or(0.0, |next_tick_num| {
(next_tick_num.0 - self.tick_num().0) as f32
}));
self.stats
.time_warp_factor
.record(self.next_time_warp_factor);
self.stats.send_rate = self.webrtc_client.send_rate();
self.stats.recv_rate = self.webrtc_client.recv_rate();
self.stats.recv_delay_std_dev = self.recv_tick_time.recv_delay_std_dev().unwrap_or(-1.0);
events
}
// TODO: Both `state` and `next_entities` need to be revised
pub fn state(&self) -> Option<comn::Game> {
// Due to loss, we might not always have an authorative state for the
// current tick num. Take the closest one then.
let mut state = self
.received_states
.iter()
.filter(|(tick_num, _)| **tick_num <= self.tick_num())
.next_back()
.map(|(_, state)| state.game.clone());
// When using prediction, overwrite the predicted entities in the
// authorative state.
if let Some(state) = state.as_mut() {
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num()));
if let Some(predicted_entities) = predicted_entities {
state.entities.extend(
predicted_entities
.iter() | }
state
}
pub fn next_entities(&self) -> BTreeMap<comn::EntityId, (comn::GameTime, comn::Entity)> {
let mut entities = BTreeMap::new();
// Add entities from authorative state, if available.
let next_state = self
.next_tick_num
.and_then(|key| self.received_states.get(&key).map(|value| (key, value)));
if let Some((recv_tick_num, recv_state)) = next_state {
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
entities.extend(
recv_state
.game
.entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (recv_game_time, entity))),
);
}
// Add entities from predicted state, if available. Note that, due to
// loss in ticks received from the server, these entities might live in
// a different time from the authorative entities.
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num().next()));
if let Some(predicted_entities) = predicted_entities {
let pred_game_time = self.settings.tick_game_time(self.tick_num().next());
entities.extend(
predicted_entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (pred_game_time, entity))),
);
}
entities
}
fn handle_message(&mut self, recv_time: Instant, message: comn::ServerMessage) {
coarse_prof::profile!("handle_message");
match message {
comn::ServerMessage::Ping(_) => {
// Handled in on_message callback to get better ping
// estimates.
}
comn::ServerMessage::Pong(sequence_num) => {
if self.ping.record_pong(recv_time, sequence_num).is_err() {
debug!(
"Ignoring pong with invalid sequence number {:?}",
sequence_num
);
}
}
comn::ServerMessage::Tick(tick) => {
self.record_server_tick(recv_time, tick);
}
comn::ServerMessage::Disconnect => {
self.disconnected = true;
}
}
}
pub fn disconnect(&mut self) {
// Send unreliable message a few times to increase chance of arrival.
for _ in 0..3 {
self.send(comn::ClientMessage::Disconnect);
}
self.disconnected = true;
}
fn send(&self, message: comn::ClientMessage) {
coarse_prof::profile!("send");
let signed_message = comn::SignedClientMessage(self.my_token, message);
let data = signed_message.serialize();
coarse_prof::profile!("webrtc");
if let Err(err) = self.webrtc_client.send(&data) {
warn!("Failed to send message: {:?}", err);
}
}
fn record_server_tick(&mut self, recv_time: Instant, tick: comn::Tick) {
let recv_tick_num = tick.diff.tick_num;
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
// Keep some statistics for debugging...
self.stats.loss.record_received(recv_tick_num.0 as usize);
if let Some(my_last_input_num) = tick.your_last_input_num.as_ref() {
self.stats
.input_delay
.record((recv_tick_num.0 - my_last_input_num.0) as f32 - 1.0);
}
if recv_game_time < self.interp_game_time {
debug!(
"Ignoring old tick of time {} vs our interp_game_time={}",
recv_game_time, self.interp_game_time,
);
return;
}
if!self.recv_tick_time.has_started() {
// If this is the first tick we have recevied from the server, reset
// to the correct time
self.interp_game_time = recv_game_time;
info!("Starting tick stream at recv_game_time={}", recv_game_time);
}
let mut new_state = if let Some(diff_base_num) = tick.diff_base {
// This tick has been delta encoded w.r.t. some tick
// that we have acknowledged receiving.
let received_state = if let Some(received_state) =
self.received_states.get(&diff_base_num)
{
received_state.game.clone()
} else {
// This should only happen if packets are severely
// reordered and delayed.
warn!(
"Received state {:?} encoded w.r.t. tick num {:?}, which we do not have (our oldest is {:?})",
recv_tick_num,
diff_base_num,
self.received_states.keys().next(),
);
return;
};
// The fact that we received a tick encoded w.r.t. this base means
// that we can forgot any older ticks -- the server will never
// again send a new tick encoded w.r.t. | .map(|(entity_id, entity)| (*entity_id, entity.clone())),
);
} | random_line_split |
runner.rs | comn::TickNum, ReceivedState>,
received_events: BTreeMap<comn::TickNum, Vec<comn::Event>>,
prediction: Option<Prediction>,
interp_game_time: comn::GameTime,
next_tick_num: Option<comn::TickNum>,
start_time: Instant,
recv_tick_time: GameTimeEstimation,
next_time_warp_factor: f32,
ping: PingEstimation,
stats: Stats,
}
impl Runner {
pub fn new(join: comn::JoinSuccess, webrtc_client: webrtc::Client) -> Self {
let prediction = Some(Prediction::new(join.your_player_id));
let recv_tick_time = GameTimeEstimation::new(join.game_settings.tick_period());
Self {
settings: Arc::new(join.game_settings),
my_token: join.your_token,
my_player_id: join.your_player_id,
webrtc_client,
disconnected: false,
last_inputs: VecDeque::new(),
received_states: BTreeMap::new(),
received_events: BTreeMap::new(),
prediction,
interp_game_time: 0.0,
next_tick_num: None,
start_time: Instant::now(),
recv_tick_time,
next_time_warp_factor: 1.0,
ping: PingEstimation::default(),
stats: Stats::default(),
}
}
pub fn my_player_id(&self) -> comn::PlayerId {
self.my_player_id
}
pub fn is_good(&self) -> bool {
self.webrtc_client.status() == webrtc::Status::Open
&&!self.disconnected
&&!self.ping.is_timeout(Instant::now())
}
pub fn settings(&self) -> &comn::Settings {
&self.settings
}
pub fn | (&self) -> &Stats {
&self.stats
}
pub fn ping(&self) -> &PingEstimation {
&self.ping
}
pub fn interp_game_time(&self) -> comn::GameTime {
self.interp_game_time
}
fn target_time_lag(&self) -> comn::GameTime {
self.settings.tick_period() * 1.5
}
fn tick_num(&self) -> comn::TickNum {
comn::TickNum((self.interp_game_time / self.settings.tick_period()) as u32)
}
pub fn update(&mut self, now: Instant, dt: Duration, input: &comn::Input) -> Vec<comn::Event> {
assert!(self.is_good());
{
coarse_prof::profile!("webrtc");
self.webrtc_client.set_now((Instant::now(), now));
while let Some((recv_time, message)) = self.webrtc_client.take_message() {
self.handle_message(recv_time, message);
}
}
if let Some(sequence_num) = self.ping.next_ping_sequence_num(now) {
self.send(comn::ClientMessage::Ping(sequence_num));
}
// Determine new local game time, making sure to stay behind the receive
// stream by our desired lag time. We do this so that we have ticks
// between which we can interpolate.
//
// If we are off too far from our lag target, slow down or speed up
// playback time.
let time_since_start = now.duration_since(self.start_time).as_secs_f32();
let recv_game_time = self.recv_tick_time.estimate(time_since_start);
let new_interp_game_time = if let Some(recv_game_time) = recv_game_time {
let current_time_lag = recv_game_time - (self.interp_game_time + dt.as_secs_f32());
let time_lag_deviation = self.target_time_lag() - current_time_lag;
self.stats
.time_lag_deviation_ms
.record(time_lag_deviation * 1000.0);
if time_lag_deviation.abs() < MAX_TIME_LAG_DEVIATION {
/*let k = 0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.05).exp());
if time_lag_deviation > 0.0 {
1.0 / k
} else {
k
}*/
//0.5 * ((-time_lag_deviation).tanh() + 2.0)
self.next_time_warp_factor =
0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.005).exp());
self.interp_game_time + self.next_time_warp_factor * dt.as_secs_f32()
} else {
// Our playback time is too far off, just jump directly to the
// target time.
let target_time = recv_game_time - self.target_time_lag();
info!(
"Time is off by {}, jumping to {}",
time_lag_deviation, target_time
);
target_time
}
} else {
// We have no knowledge of the tick receive time, probably didn't
// receive the first tick packet yet.
self.interp_game_time
};
// Don't let time run further than the ticks that we have received.
// This is here so that we stop local time if the server drops or
// starts lagging heavily.
let max_tick_num = self
.received_states
.keys()
.rev()
.next()
.copied()
.unwrap_or(comn::TickNum(0))
.max(self.tick_num())
.max(self.next_tick_num.unwrap_or(comn::TickNum(0)));
// Advance our playback time.
let prev_tick_num = self.tick_num();
self.interp_game_time =
new_interp_game_time.min(self.settings.tick_game_time(max_tick_num));
let new_tick_num = self.tick_num();
// Look at all the intermediate ticks. We will have one of the
// following cases:
//
// 1. In this update call, the tick number did not change, so
// `prev_tick_num == new_tick_num`.
// 2. We crossed one tick, e.g. prev_tick_num is 7 and new_tick_num is
// 8.
// 3. We crossed more than one tick. This should happen only on lag
// spikes, be it local or in the network.
let mut crossed_tick_nums: Vec<comn::TickNum> = (prev_tick_num.0 + 1..=new_tick_num.0)
.map(comn::TickNum)
.collect();
if crossed_tick_nums.len() > MAX_TICKS_PER_UPDATE {
// It's possible that we have a large jump in ticks, e.g. due to a
// lag spike, or because we are running in a background tab. In this
// case, we don't want to overload ourselves by sending many input
// packets and performing prediction over many ticks. Instead, we
// just jump directly to the last couple of ticks.
info!("Crossed {} ticks, will skip", crossed_tick_nums.len());
// TODO: In order to nicely reinitialize prediction, we should take
// those crossed ticks for which we actually received a server
// state...
crossed_tick_nums.drain(0..crossed_tick_nums.len() - MAX_TICKS_PER_UPDATE);
assert!(crossed_tick_nums.len() == MAX_TICKS_PER_UPDATE);
}
// Iterate over all the ticks that we have crossed, also including
// those for which we did not receive anything from the server.
let mut events = Vec::new();
for tick_num in crossed_tick_nums.iter() {
coarse_prof::profile!("tick");
// For debugging, keep track of how many ticks we do not
// receive server data on time.
if self.received_states.get(tick_num).is_some() {
self.stats.skip_loss.record_received(tick_num.0 as usize);
}
// Start server events of crossed ticks.
if let Some(tick_events) = self.received_events.get(tick_num) {
events.extend(tick_events.clone().into_iter());
self.received_events.remove(tick_num);
}
// Send inputs for server ticks we cross.
self.last_inputs.push_back((*tick_num, input.clone()));
while self.last_inputs.len() > comn::MAX_INPUTS_PER_MESSAGE {
self.last_inputs.pop_front();
}
self.send(comn::ClientMessage::Input(
self.last_inputs.iter().cloned().collect(),
));
// Predict effects of our own input locally.
if let Some(prediction) = self.prediction.as_mut() {
coarse_prof::profile!("predict");
prediction.record_tick_input(
*tick_num,
input.clone(),
self.received_states.get(tick_num),
);
}
}
coarse_prof::profile!("cleanup");
if self.next_tick_num <= Some(self.tick_num()) {
// We have reached the tick that we were interpolating into, so
// we'll need to look for the next interpolation target.
self.next_tick_num = None;
}
// Do we have a tick to interpolate into ready?
if self.next_tick_num.is_none() {
let min_ready_num = self.received_states.keys().find(|tick_num| {
**tick_num > self.tick_num() && tick_num.0 - self.tick_num().0 <= 3
});
if let Some(min_ready_num) = min_ready_num {
self.next_tick_num = Some(*min_ready_num);
}
}
// Remove events for older ticks, we will no longer need them. Note,
// however, that the same cannot be said about the received states,
// since we may still need them as the basis for delta decoding.
// Received states are only pruned when we receive new states.
{
let remove_tick_nums: Vec<comn::TickNum> = self
.received_events
.keys()
.copied()
.filter(|tick_num| *tick_num < self.tick_num())
.collect();
for tick_num in remove_tick_nums {
self.received_events.remove(&tick_num);
}
}
// Keep some statistics for debugging...
if let Some(recv_game_time) = recv_game_time {
self.stats
.time_lag_ms
.record((recv_game_time - self.interp_game_time) * 1000.0);
} else {
// We cannot estimate the server time, so we probably disconnected
// or just connected.
self.stats.time_lag_ms = stats::Var::default();
}
self.stats
.tick_interp
.record(self.next_tick_num.map_or(0.0, |next_tick_num| {
(next_tick_num.0 - self.tick_num().0) as f32
}));
self.stats
.time_warp_factor
.record(self.next_time_warp_factor);
self.stats.send_rate = self.webrtc_client.send_rate();
self.stats.recv_rate = self.webrtc_client.recv_rate();
self.stats.recv_delay_std_dev = self.recv_tick_time.recv_delay_std_dev().unwrap_or(-1.0);
events
}
// TODO: Both `state` and `next_entities` need to be revised
pub fn state(&self) -> Option<comn::Game> {
// Due to loss, we might not always have an authorative state for the
// current tick num. Take the closest one then.
let mut state = self
.received_states
.iter()
.filter(|(tick_num, _)| **tick_num <= self.tick_num())
.next_back()
.map(|(_, state)| state.game.clone());
// When using prediction, overwrite the predicted entities in the
// authorative state.
if let Some(state) = state.as_mut() {
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num()));
if let Some(predicted_entities) = predicted_entities {
state.entities.extend(
predicted_entities
.iter()
.map(|(entity_id, entity)| (*entity_id, entity.clone())),
);
}
}
state
}
pub fn next_entities(&self) -> BTreeMap<comn::EntityId, (comn::GameTime, comn::Entity)> {
let mut entities = BTreeMap::new();
// Add entities from authorative state, if available.
let next_state = self
.next_tick_num
.and_then(|key| self.received_states.get(&key).map(|value| (key, value)));
if let Some((recv_tick_num, recv_state)) = next_state {
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
entities.extend(
recv_state
.game
.entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (recv_game_time, entity))),
);
}
// Add entities from predicted state, if available. Note that, due to
// loss in ticks received from the server, these entities might live in
// a different time from the authorative entities.
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num().next()));
if let Some(predicted_entities) = predicted_entities {
let pred_game_time = self.settings.tick_game_time(self.tick_num().next());
entities.extend(
predicted_entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (pred_game_time, entity))),
);
}
entities
}
fn handle_message(&mut self, recv_time: Instant, message: comn::ServerMessage) {
coarse_prof::profile!("handle_message");
match message {
comn::ServerMessage::Ping(_) => {
// Handled in on_message callback to get better ping
// estimates.
}
comn::ServerMessage::Pong(sequence_num) => {
if self.ping.record_pong(recv_time, sequence_num).is_err() {
debug!(
"Ignoring pong with invalid sequence number {:?}",
sequence_num
);
}
}
comn::ServerMessage::Tick(tick) => {
self.record_server_tick(recv_time, tick);
}
comn::ServerMessage::Disconnect => {
self.disconnected = true;
}
}
}
pub fn disconnect(&mut self) {
// Send unreliable message a few times to increase chance of arrival.
for _ in 0..3 {
self.send(comn::ClientMessage::Disconnect);
}
self.disconnected = true;
}
fn send(&self, message: comn::ClientMessage) {
coarse_prof::profile!("send");
let signed_message = comn::SignedClientMessage(self.my_token, message);
let data = signed_message.serialize();
coarse_prof::profile!("webrtc");
if let Err(err) = self.webrtc_client.send(&data) {
warn!("Failed to send message: {:?}", err);
}
}
fn record_server_tick(&mut self, recv_time: Instant, tick: comn::Tick) {
let recv_tick_num = tick.diff.tick_num;
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
// Keep some statistics for debugging...
self.stats.loss.record_received(recv_tick_num.0 as usize);
if let Some(my_last_input_num) = tick.your_last_input_num.as_ref() {
self.stats
.input_delay
.record((recv_tick_num.0 - my_last_input_num.0) as f32 - 1.0);
}
if recv_game_time < self.interp_game_time {
debug!(
"Ignoring old tick of time {} vs our interp_game_time={}",
recv_game_time, self.interp_game_time,
);
return;
}
if!self.recv_tick_time.has_started() {
// If this is the first tick we have recevied from the server, reset
// to the correct time
self.interp_game_time = recv_game_time;
info!("Starting tick stream at recv_game_time={}", recv_game_time);
}
let mut new_state = if let Some(diff_base_num) = tick.diff_base {
// This tick has been delta encoded w.r.t. some tick
// that we have acknowledged receiving.
let received_state = if let Some(received_state) =
self.received_states.get(&diff_base_num)
{
received_state.game.clone()
} else {
// This should only happen if packets are severely
// reordered and delayed.
warn!(
"Received state {:?} encoded w.r.t. tick num {:?}, which we do not have (our oldest is {:?})",
recv_tick_num,
diff_base_num,
self.received_states.keys().next(),
);
return;
};
// The fact that we received a tick encoded w.r.t. this base means
// that we can forgot any older ticks -- the server will never
// again send a new tick encoded w.r. | stats | identifier_name |
runner.rs | comn::TickNum, ReceivedState>,
received_events: BTreeMap<comn::TickNum, Vec<comn::Event>>,
prediction: Option<Prediction>,
interp_game_time: comn::GameTime,
next_tick_num: Option<comn::TickNum>,
start_time: Instant,
recv_tick_time: GameTimeEstimation,
next_time_warp_factor: f32,
ping: PingEstimation,
stats: Stats,
}
impl Runner {
pub fn new(join: comn::JoinSuccess, webrtc_client: webrtc::Client) -> Self {
let prediction = Some(Prediction::new(join.your_player_id));
let recv_tick_time = GameTimeEstimation::new(join.game_settings.tick_period());
Self {
settings: Arc::new(join.game_settings),
my_token: join.your_token,
my_player_id: join.your_player_id,
webrtc_client,
disconnected: false,
last_inputs: VecDeque::new(),
received_states: BTreeMap::new(),
received_events: BTreeMap::new(),
prediction,
interp_game_time: 0.0,
next_tick_num: None,
start_time: Instant::now(),
recv_tick_time,
next_time_warp_factor: 1.0,
ping: PingEstimation::default(),
stats: Stats::default(),
}
}
pub fn my_player_id(&self) -> comn::PlayerId {
self.my_player_id
}
pub fn is_good(&self) -> bool {
self.webrtc_client.status() == webrtc::Status::Open
&&!self.disconnected
&&!self.ping.is_timeout(Instant::now())
}
pub fn settings(&self) -> &comn::Settings {
&self.settings
}
pub fn stats(&self) -> &Stats {
&self.stats
}
pub fn ping(&self) -> &PingEstimation {
&self.ping
}
pub fn interp_game_time(&self) -> comn::GameTime {
self.interp_game_time
}
fn target_time_lag(&self) -> comn::GameTime {
self.settings.tick_period() * 1.5
}
fn tick_num(&self) -> comn::TickNum {
comn::TickNum((self.interp_game_time / self.settings.tick_period()) as u32)
}
pub fn update(&mut self, now: Instant, dt: Duration, input: &comn::Input) -> Vec<comn::Event> {
assert!(self.is_good());
{
coarse_prof::profile!("webrtc");
self.webrtc_client.set_now((Instant::now(), now));
while let Some((recv_time, message)) = self.webrtc_client.take_message() {
self.handle_message(recv_time, message);
}
}
if let Some(sequence_num) = self.ping.next_ping_sequence_num(now) {
self.send(comn::ClientMessage::Ping(sequence_num));
}
// Determine new local game time, making sure to stay behind the receive
// stream by our desired lag time. We do this so that we have ticks
// between which we can interpolate.
//
// If we are off too far from our lag target, slow down or speed up
// playback time.
let time_since_start = now.duration_since(self.start_time).as_secs_f32();
let recv_game_time = self.recv_tick_time.estimate(time_since_start);
let new_interp_game_time = if let Some(recv_game_time) = recv_game_time {
let current_time_lag = recv_game_time - (self.interp_game_time + dt.as_secs_f32());
let time_lag_deviation = self.target_time_lag() - current_time_lag;
self.stats
.time_lag_deviation_ms
.record(time_lag_deviation * 1000.0);
if time_lag_deviation.abs() < MAX_TIME_LAG_DEVIATION {
/*let k = 0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.05).exp());
if time_lag_deviation > 0.0 {
1.0 / k
} else {
k
}*/
//0.5 * ((-time_lag_deviation).tanh() + 2.0)
self.next_time_warp_factor =
0.5 + (2.0 - 0.5) / (1.0 + 2.0 * (time_lag_deviation / 0.005).exp());
self.interp_game_time + self.next_time_warp_factor * dt.as_secs_f32()
} else {
// Our playback time is too far off, just jump directly to the
// target time.
let target_time = recv_game_time - self.target_time_lag();
info!(
"Time is off by {}, jumping to {}",
time_lag_deviation, target_time
);
target_time
}
} else {
// We have no knowledge of the tick receive time, probably didn't
// receive the first tick packet yet.
self.interp_game_time
};
// Don't let time run further than the ticks that we have received.
// This is here so that we stop local time if the server drops or
// starts lagging heavily.
let max_tick_num = self
.received_states
.keys()
.rev()
.next()
.copied()
.unwrap_or(comn::TickNum(0))
.max(self.tick_num())
.max(self.next_tick_num.unwrap_or(comn::TickNum(0)));
// Advance our playback time.
let prev_tick_num = self.tick_num();
self.interp_game_time =
new_interp_game_time.min(self.settings.tick_game_time(max_tick_num));
let new_tick_num = self.tick_num();
// Look at all the intermediate ticks. We will have one of the
// following cases:
//
// 1. In this update call, the tick number did not change, so
// `prev_tick_num == new_tick_num`.
// 2. We crossed one tick, e.g. prev_tick_num is 7 and new_tick_num is
// 8.
// 3. We crossed more than one tick. This should happen only on lag
// spikes, be it local or in the network.
let mut crossed_tick_nums: Vec<comn::TickNum> = (prev_tick_num.0 + 1..=new_tick_num.0)
.map(comn::TickNum)
.collect();
if crossed_tick_nums.len() > MAX_TICKS_PER_UPDATE {
// It's possible that we have a large jump in ticks, e.g. due to a
// lag spike, or because we are running in a background tab. In this
// case, we don't want to overload ourselves by sending many input
// packets and performing prediction over many ticks. Instead, we
// just jump directly to the last couple of ticks.
info!("Crossed {} ticks, will skip", crossed_tick_nums.len());
// TODO: In order to nicely reinitialize prediction, we should take
// those crossed ticks for which we actually received a server
// state...
crossed_tick_nums.drain(0..crossed_tick_nums.len() - MAX_TICKS_PER_UPDATE);
assert!(crossed_tick_nums.len() == MAX_TICKS_PER_UPDATE);
}
// Iterate over all the ticks that we have crossed, also including
// those for which we did not receive anything from the server.
let mut events = Vec::new();
for tick_num in crossed_tick_nums.iter() {
coarse_prof::profile!("tick");
// For debugging, keep track of how many ticks we do not
// receive server data on time.
if self.received_states.get(tick_num).is_some() {
self.stats.skip_loss.record_received(tick_num.0 as usize);
}
// Start server events of crossed ticks.
if let Some(tick_events) = self.received_events.get(tick_num) {
events.extend(tick_events.clone().into_iter());
self.received_events.remove(tick_num);
}
// Send inputs for server ticks we cross.
self.last_inputs.push_back((*tick_num, input.clone()));
while self.last_inputs.len() > comn::MAX_INPUTS_PER_MESSAGE {
self.last_inputs.pop_front();
}
self.send(comn::ClientMessage::Input(
self.last_inputs.iter().cloned().collect(),
));
// Predict effects of our own input locally.
if let Some(prediction) = self.prediction.as_mut() {
coarse_prof::profile!("predict");
prediction.record_tick_input(
*tick_num,
input.clone(),
self.received_states.get(tick_num),
);
}
}
coarse_prof::profile!("cleanup");
if self.next_tick_num <= Some(self.tick_num()) {
// We have reached the tick that we were interpolating into, so
// we'll need to look for the next interpolation target.
self.next_tick_num = None;
}
// Do we have a tick to interpolate into ready?
if self.next_tick_num.is_none() {
let min_ready_num = self.received_states.keys().find(|tick_num| {
**tick_num > self.tick_num() && tick_num.0 - self.tick_num().0 <= 3
});
if let Some(min_ready_num) = min_ready_num |
}
// Remove events for older ticks, we will no longer need them. Note,
// however, that the same cannot be said about the received states,
// since we may still need them as the basis for delta decoding.
// Received states are only pruned when we receive new states.
{
let remove_tick_nums: Vec<comn::TickNum> = self
.received_events
.keys()
.copied()
.filter(|tick_num| *tick_num < self.tick_num())
.collect();
for tick_num in remove_tick_nums {
self.received_events.remove(&tick_num);
}
}
// Keep some statistics for debugging...
if let Some(recv_game_time) = recv_game_time {
self.stats
.time_lag_ms
.record((recv_game_time - self.interp_game_time) * 1000.0);
} else {
// We cannot estimate the server time, so we probably disconnected
// or just connected.
self.stats.time_lag_ms = stats::Var::default();
}
self.stats
.tick_interp
.record(self.next_tick_num.map_or(0.0, |next_tick_num| {
(next_tick_num.0 - self.tick_num().0) as f32
}));
self.stats
.time_warp_factor
.record(self.next_time_warp_factor);
self.stats.send_rate = self.webrtc_client.send_rate();
self.stats.recv_rate = self.webrtc_client.recv_rate();
self.stats.recv_delay_std_dev = self.recv_tick_time.recv_delay_std_dev().unwrap_or(-1.0);
events
}
// TODO: Both `state` and `next_entities` need to be revised
pub fn state(&self) -> Option<comn::Game> {
// Due to loss, we might not always have an authorative state for the
// current tick num. Take the closest one then.
let mut state = self
.received_states
.iter()
.filter(|(tick_num, _)| **tick_num <= self.tick_num())
.next_back()
.map(|(_, state)| state.game.clone());
// When using prediction, overwrite the predicted entities in the
// authorative state.
if let Some(state) = state.as_mut() {
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num()));
if let Some(predicted_entities) = predicted_entities {
state.entities.extend(
predicted_entities
.iter()
.map(|(entity_id, entity)| (*entity_id, entity.clone())),
);
}
}
state
}
pub fn next_entities(&self) -> BTreeMap<comn::EntityId, (comn::GameTime, comn::Entity)> {
let mut entities = BTreeMap::new();
// Add entities from authorative state, if available.
let next_state = self
.next_tick_num
.and_then(|key| self.received_states.get(&key).map(|value| (key, value)));
if let Some((recv_tick_num, recv_state)) = next_state {
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
entities.extend(
recv_state
.game
.entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (recv_game_time, entity))),
);
}
// Add entities from predicted state, if available. Note that, due to
// loss in ticks received from the server, these entities might live in
// a different time from the authorative entities.
let predicted_entities = self
.prediction
.as_ref()
.and_then(|prediction| prediction.predicted_entities(self.tick_num().next()));
if let Some(predicted_entities) = predicted_entities {
let pred_game_time = self.settings.tick_game_time(self.tick_num().next());
entities.extend(
predicted_entities
.clone()
.into_iter()
.map(|(entity_id, entity)| (entity_id, (pred_game_time, entity))),
);
}
entities
}
fn handle_message(&mut self, recv_time: Instant, message: comn::ServerMessage) {
coarse_prof::profile!("handle_message");
match message {
comn::ServerMessage::Ping(_) => {
// Handled in on_message callback to get better ping
// estimates.
}
comn::ServerMessage::Pong(sequence_num) => {
if self.ping.record_pong(recv_time, sequence_num).is_err() {
debug!(
"Ignoring pong with invalid sequence number {:?}",
sequence_num
);
}
}
comn::ServerMessage::Tick(tick) => {
self.record_server_tick(recv_time, tick);
}
comn::ServerMessage::Disconnect => {
self.disconnected = true;
}
}
}
pub fn disconnect(&mut self) {
// Send unreliable message a few times to increase chance of arrival.
for _ in 0..3 {
self.send(comn::ClientMessage::Disconnect);
}
self.disconnected = true;
}
fn send(&self, message: comn::ClientMessage) {
coarse_prof::profile!("send");
let signed_message = comn::SignedClientMessage(self.my_token, message);
let data = signed_message.serialize();
coarse_prof::profile!("webrtc");
if let Err(err) = self.webrtc_client.send(&data) {
warn!("Failed to send message: {:?}", err);
}
}
fn record_server_tick(&mut self, recv_time: Instant, tick: comn::Tick) {
let recv_tick_num = tick.diff.tick_num;
let recv_game_time = self.settings.tick_game_time(recv_tick_num);
// Keep some statistics for debugging...
self.stats.loss.record_received(recv_tick_num.0 as usize);
if let Some(my_last_input_num) = tick.your_last_input_num.as_ref() {
self.stats
.input_delay
.record((recv_tick_num.0 - my_last_input_num.0) as f32 - 1.0);
}
if recv_game_time < self.interp_game_time {
debug!(
"Ignoring old tick of time {} vs our interp_game_time={}",
recv_game_time, self.interp_game_time,
);
return;
}
if!self.recv_tick_time.has_started() {
// If this is the first tick we have recevied from the server, reset
// to the correct time
self.interp_game_time = recv_game_time;
info!("Starting tick stream at recv_game_time={}", recv_game_time);
}
let mut new_state = if let Some(diff_base_num) = tick.diff_base {
// This tick has been delta encoded w.r.t. some tick
// that we have acknowledged receiving.
let received_state = if let Some(received_state) =
self.received_states.get(&diff_base_num)
{
received_state.game.clone()
} else {
// This should only happen if packets are severely
// reordered and delayed.
warn!(
"Received state {:?} encoded w.r.t. tick num {:?}, which we do not have (our oldest is {:?})",
recv_tick_num,
diff_base_num,
self.received_states.keys().next(),
);
return;
};
// The fact that we received a tick encoded w.r.t. this base means
// that we can forgot any older ticks -- the server will never
// again send a new tick encoded w.r. | {
self.next_tick_num = Some(*min_ready_num);
} | conditional_block |
on_initialize.rs | #![allow(unused)]
use crate::process::ShellCommand;
use crate::stdio_server::job;
use crate::stdio_server::provider::{Context, ProviderSource};
use crate::tools::ctags::ProjectCtagsCommand;
use crate::tools::rg::{RgTokioCommand, RG_EXEC_CMD};
use anyhow::Result;
use filter::SourceItem;
use printer::{DisplayLines, Printer};
use serde_json::{json, Value};
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use types::ClapItem;
use utils::count_lines;
async fn execute_and_write_cache(
cmd: &str,
cache_file: std::path::PathBuf,
) -> std::io::Result<ProviderSource> {
// Can not use subprocess::Exec::shell here.
//
// Must use TokioCommand otherwise the timeout may not work.
let mut tokio_cmd = crate::process::tokio::shell_command(cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let total = count_lines(std::fs::File::open(&cache_file)?)?;
Ok(ProviderSource::CachedFile {
total,
path: cache_file,
refreshed: true,
})
}
fn to_small_provider_source(lines: Vec<String>) -> ProviderSource {
let total = lines.len();
let items = lines
.into_iter()
.map(|line| Arc::new(SourceItem::from(line)) as Arc<dyn ClapItem>)
.collect::<Vec<_>>();
ProviderSource::Small { total, items }
}
/// Performs the initialization like collecting the source and total number of source items.
async fn initialize_provider_source(ctx: &Context) -> Result<ProviderSource> {
// Known providers.
match ctx.provider_id() {
"blines" => {
let total = count_lines(std::fs::File::open(&ctx.env.start_buffer_path)?)?;
let path = ctx.env.start_buffer_path.clone();
return Ok(ProviderSource::File { total, path });
}
"tags" => {
let items = crate::tools::ctags::buffer_tag_items(&ctx.env.start_buffer_path, false)?;
let total = items.len();
return Ok(ProviderSource::Small { total, items });
}
"proj_tags" => {
let ctags_cmd = ProjectCtagsCommand::with_cwd(ctx.cwd.to_path_buf());
let provider_source = if ctx.env.no_cache {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
} else {
match ctags_cmd.ctags_cache() {
Some((total, path)) => ProviderSource::CachedFile {
total,
path,
refreshed: false,
},
None => {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
}
}
};
return Ok(provider_source);
}
"help_tags" => {
let helplang: String = ctx.vim.eval("&helplang").await?;
let runtimepath: String = ctx.vim.eval("&runtimepath").await?;
let doc_tags = std::iter::once("/doc/tags".to_string()).chain(
helplang
.split(',')
.filter(|&lang| lang!= "en")
.map(|lang| format!("/doc/tags-{lang}")),
);
let lines = crate::helptags::generate_tag_lines(doc_tags, &runtimepath);
return Ok(to_small_provider_source(lines));
}
_ => {}
}
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
if let Some(value) = source_cmd.into_iter().next() {
match value {
// Source is a String: g:__t_string, g:__t_func_string
Value::String(command) => {
let shell_cmd = ShellCommand::new(command, ctx.cwd.to_path_buf());
let cache_file = shell_cmd.cache_file_path()?;
const DIRECT_CREATE_NEW_SOURCE: &[&str] = &["files"];
let create_new_source_directly =
DIRECT_CREATE_NEW_SOURCE.contains(&ctx.provider_id());
let provider_source = if create_new_source_directly || ctx.env.no_cache {
execute_and_write_cache(&shell_cmd.command, cache_file).await?
} else {
match shell_cmd.cache_digest() {
Some(digest) => ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: false,
},
None => execute_and_write_cache(&shell_cmd.command, cache_file).await?,
}
};
if let ProviderSource::CachedFile { path,.. } = &provider_source {
ctx.vim.set_var("g:__clap_forerunner_tempfile", path)?;
}
return Ok(provider_source);
}
// Source is a List: g:__t_list, g:__t_func_list
Value::Array(arr) => {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
return Ok(to_small_provider_source(lines));
}
_ => {}
}
}
Ok(ProviderSource::Uninitialized)
}
fn on_initialized_source(
provider_source: ProviderSource,
ctx: &Context,
init_display: bool,
) -> Result<()> {
if let Some(total) = provider_source.total() {
ctx.vim.set_var("g:clap.display.initial_size", total)?;
}
if init_display {
if let Some(items) = provider_source.try_skim(ctx.provider_id(), 100) {
let printer = Printer::new(ctx.env.display_winwidth, ctx.env.icon);
let DisplayLines {
lines,
icon_added,
truncated_map,
..
} = printer.to_display_lines(items);
let using_cache = provider_source.using_cache();
ctx.vim.exec(
"clap#state#init_display",
json!([lines, truncated_map, icon_added, using_cache]),
)?;
}
if ctx.initializing_prompt_echoed.load(Ordering::SeqCst) {
ctx.vim.bare_exec("clap#helper#echo_clear")?;
}
}
ctx.set_provider_source(provider_source);
Ok(())
}
async fn initialize_list_source(ctx: Context, init_display: bool) -> Result<()> {
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
// Source must be initialized when it is a List: g:__t_list, g:__t_func_list
if let Some(Value::Array(arr)) = source_cmd.into_iter().next() {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
on_initialized_source(to_small_provider_source(lines), &ctx, init_display)?;
}
Ok(())
}
pub async fn initialize_provider(ctx: &Context, init_display: bool) -> Result<()> | Ok(Err(e)) => tracing::error!(?e, "Error occurred while initializing the provider source"),
Err(_) => {
// The initialization was not super fast.
tracing::debug!(timeout =?TIMEOUT, "Did not receive value in time");
let source_cmd: Vec<String> = ctx.vim.bare_call("provider_source_cmd").await?;
let maybe_source_cmd = source_cmd.into_iter().next();
if let Some(source_cmd) = maybe_source_cmd {
ctx.set_provider_source(ProviderSource::Command(source_cmd));
}
/* no longer necessary for grep provider.
// Try creating cache for some potential heavy providers.
match context.provider_id() {
"grep" | "live_grep" => {
context.set_provider_source(ProviderSource::Command(RG_EXEC_CMD.to_string()));
let context = context.clone();
let rg_cmd = RgTokioCommand::new(context.cwd.to_path_buf());
let job_id = utils::calculate_hash(&rg_cmd);
job::try_start(
async move {
if let Ok(digest) = rg_cmd.create_cache().await {
let new = ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: true,
};
if!context.terminated.load(Ordering::SeqCst) {
context.set_provider_source(new);
}
}
},
job_id,
);
}
_ => {}
}
*/
}
}
Ok(())
}
| {
// Skip the initialization.
match ctx.provider_id() {
"grep" | "live_grep" => return Ok(()),
_ => {}
}
if ctx.env.source_is_list {
let ctx = ctx.clone();
ctx.set_provider_source(ProviderSource::Initializing);
// Initialize the list-style providers in another task so that the further messages won't
// be blocked by the initialization in case it takes too long.
tokio::spawn(initialize_list_source(ctx, init_display));
return Ok(());
}
const TIMEOUT: Duration = Duration::from_millis(300);
match tokio::time::timeout(TIMEOUT, initialize_provider_source(ctx)).await {
Ok(Ok(provider_source)) => on_initialized_source(provider_source, ctx, init_display)?, | identifier_body |
on_initialize.rs | #![allow(unused)]
use crate::process::ShellCommand;
use crate::stdio_server::job;
use crate::stdio_server::provider::{Context, ProviderSource};
use crate::tools::ctags::ProjectCtagsCommand;
use crate::tools::rg::{RgTokioCommand, RG_EXEC_CMD};
use anyhow::Result;
use filter::SourceItem;
use printer::{DisplayLines, Printer};
use serde_json::{json, Value};
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use types::ClapItem;
use utils::count_lines;
async fn execute_and_write_cache(
cmd: &str,
cache_file: std::path::PathBuf,
) -> std::io::Result<ProviderSource> {
// Can not use subprocess::Exec::shell here.
//
// Must use TokioCommand otherwise the timeout may not work.
let mut tokio_cmd = crate::process::tokio::shell_command(cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let total = count_lines(std::fs::File::open(&cache_file)?)?;
Ok(ProviderSource::CachedFile {
total,
path: cache_file,
refreshed: true,
})
}
fn to_small_provider_source(lines: Vec<String>) -> ProviderSource {
let total = lines.len();
let items = lines
.into_iter()
.map(|line| Arc::new(SourceItem::from(line)) as Arc<dyn ClapItem>)
.collect::<Vec<_>>();
ProviderSource::Small { total, items }
}
/// Performs the initialization like collecting the source and total number of source items.
async fn initialize_provider_source(ctx: &Context) -> Result<ProviderSource> {
// Known providers.
match ctx.provider_id() {
"blines" => {
let total = count_lines(std::fs::File::open(&ctx.env.start_buffer_path)?)?;
let path = ctx.env.start_buffer_path.clone();
return Ok(ProviderSource::File { total, path });
}
"tags" => {
let items = crate::tools::ctags::buffer_tag_items(&ctx.env.start_buffer_path, false)?;
let total = items.len();
return Ok(ProviderSource::Small { total, items });
}
"proj_tags" => {
let ctags_cmd = ProjectCtagsCommand::with_cwd(ctx.cwd.to_path_buf());
let provider_source = if ctx.env.no_cache {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
} else {
match ctags_cmd.ctags_cache() {
Some((total, path)) => ProviderSource::CachedFile {
total,
path,
refreshed: false,
},
None => {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
}
}
};
return Ok(provider_source);
}
"help_tags" => {
let helplang: String = ctx.vim.eval("&helplang").await?;
let runtimepath: String = ctx.vim.eval("&runtimepath").await?;
let doc_tags = std::iter::once("/doc/tags".to_string()).chain(
helplang
.split(',')
.filter(|&lang| lang!= "en")
.map(|lang| format!("/doc/tags-{lang}")),
);
let lines = crate::helptags::generate_tag_lines(doc_tags, &runtimepath);
return Ok(to_small_provider_source(lines));
}
_ => {}
}
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
if let Some(value) = source_cmd.into_iter().next() {
match value {
// Source is a String: g:__t_string, g:__t_func_string
Value::String(command) => {
let shell_cmd = ShellCommand::new(command, ctx.cwd.to_path_buf());
let cache_file = shell_cmd.cache_file_path()?;
const DIRECT_CREATE_NEW_SOURCE: &[&str] = &["files"];
let create_new_source_directly =
DIRECT_CREATE_NEW_SOURCE.contains(&ctx.provider_id());
let provider_source = if create_new_source_directly || ctx.env.no_cache {
execute_and_write_cache(&shell_cmd.command, cache_file).await?
} else {
match shell_cmd.cache_digest() {
Some(digest) => ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: false,
},
None => execute_and_write_cache(&shell_cmd.command, cache_file).await?,
}
};
if let ProviderSource::CachedFile { path,.. } = &provider_source {
ctx.vim.set_var("g:__clap_forerunner_tempfile", path)?;
}
return Ok(provider_source);
}
// Source is a List: g:__t_list, g:__t_func_list
Value::Array(arr) => {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
return Ok(to_small_provider_source(lines));
}
_ => {}
}
}
Ok(ProviderSource::Uninitialized)
}
fn on_initialized_source(
provider_source: ProviderSource,
ctx: &Context,
init_display: bool,
) -> Result<()> {
if let Some(total) = provider_source.total() {
ctx.vim.set_var("g:clap.display.initial_size", total)?;
}
if init_display {
if let Some(items) = provider_source.try_skim(ctx.provider_id(), 100) {
let printer = Printer::new(ctx.env.display_winwidth, ctx.env.icon);
let DisplayLines {
lines,
icon_added,
truncated_map,
..
} = printer.to_display_lines(items);
let using_cache = provider_source.using_cache();
ctx.vim.exec(
"clap#state#init_display",
json!([lines, truncated_map, icon_added, using_cache]),
)?;
}
if ctx.initializing_prompt_echoed.load(Ordering::SeqCst) {
ctx.vim.bare_exec("clap#helper#echo_clear")?;
}
}
ctx.set_provider_source(provider_source);
Ok(())
}
async fn initialize_list_source(ctx: Context, init_display: bool) -> Result<()> {
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
// Source must be initialized when it is a List: g:__t_list, g:__t_func_list
if let Some(Value::Array(arr)) = source_cmd.into_iter().next() {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
on_initialized_source(to_small_provider_source(lines), &ctx, init_display)?;
}
Ok(())
}
pub async fn | (ctx: &Context, init_display: bool) -> Result<()> {
// Skip the initialization.
match ctx.provider_id() {
"grep" | "live_grep" => return Ok(()),
_ => {}
}
if ctx.env.source_is_list {
let ctx = ctx.clone();
ctx.set_provider_source(ProviderSource::Initializing);
// Initialize the list-style providers in another task so that the further messages won't
// be blocked by the initialization in case it takes too long.
tokio::spawn(initialize_list_source(ctx, init_display));
return Ok(());
}
const TIMEOUT: Duration = Duration::from_millis(300);
match tokio::time::timeout(TIMEOUT, initialize_provider_source(ctx)).await {
Ok(Ok(provider_source)) => on_initialized_source(provider_source, ctx, init_display)?,
Ok(Err(e)) => tracing::error!(?e, "Error occurred while initializing the provider source"),
Err(_) => {
// The initialization was not super fast.
tracing::debug!(timeout =?TIMEOUT, "Did not receive value in time");
let source_cmd: Vec<String> = ctx.vim.bare_call("provider_source_cmd").await?;
let maybe_source_cmd = source_cmd.into_iter().next();
if let Some(source_cmd) = maybe_source_cmd {
ctx.set_provider_source(ProviderSource::Command(source_cmd));
}
/* no longer necessary for grep provider.
// Try creating cache for some potential heavy providers.
match context.provider_id() {
"grep" | "live_grep" => {
context.set_provider_source(ProviderSource::Command(RG_EXEC_CMD.to_string()));
let context = context.clone();
let rg_cmd = RgTokioCommand::new(context.cwd.to_path_buf());
let job_id = utils::calculate_hash(&rg_cmd);
job::try_start(
async move {
if let Ok(digest) = rg_cmd.create_cache().await {
let new = ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: true,
};
if!context.terminated.load(Ordering::SeqCst) {
context.set_provider_source(new);
}
}
},
job_id,
);
}
_ => {}
}
*/
}
}
Ok(())
}
| initialize_provider | identifier_name |
on_initialize.rs | #![allow(unused)]
use crate::process::ShellCommand;
use crate::stdio_server::job;
use crate::stdio_server::provider::{Context, ProviderSource};
use crate::tools::ctags::ProjectCtagsCommand;
use crate::tools::rg::{RgTokioCommand, RG_EXEC_CMD};
use anyhow::Result;
use filter::SourceItem;
use printer::{DisplayLines, Printer};
use serde_json::{json, Value};
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use types::ClapItem;
use utils::count_lines;
async fn execute_and_write_cache(
cmd: &str,
cache_file: std::path::PathBuf,
) -> std::io::Result<ProviderSource> {
// Can not use subprocess::Exec::shell here.
//
// Must use TokioCommand otherwise the timeout may not work.
let mut tokio_cmd = crate::process::tokio::shell_command(cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let total = count_lines(std::fs::File::open(&cache_file)?)?;
Ok(ProviderSource::CachedFile {
total,
path: cache_file,
refreshed: true,
})
}
fn to_small_provider_source(lines: Vec<String>) -> ProviderSource {
let total = lines.len();
let items = lines
.into_iter()
.map(|line| Arc::new(SourceItem::from(line)) as Arc<dyn ClapItem>)
.collect::<Vec<_>>();
ProviderSource::Small { total, items }
}
/// Performs the initialization like collecting the source and total number of source items.
async fn initialize_provider_source(ctx: &Context) -> Result<ProviderSource> {
// Known providers.
match ctx.provider_id() {
"blines" => {
let total = count_lines(std::fs::File::open(&ctx.env.start_buffer_path)?)?;
let path = ctx.env.start_buffer_path.clone();
return Ok(ProviderSource::File { total, path });
}
"tags" => {
let items = crate::tools::ctags::buffer_tag_items(&ctx.env.start_buffer_path, false)?;
let total = items.len();
return Ok(ProviderSource::Small { total, items });
}
"proj_tags" => {
let ctags_cmd = ProjectCtagsCommand::with_cwd(ctx.cwd.to_path_buf());
let provider_source = if ctx.env.no_cache {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
} else {
match ctags_cmd.ctags_cache() {
Some((total, path)) => ProviderSource::CachedFile {
total,
path,
refreshed: false,
},
None => {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines) | };
return Ok(provider_source);
}
"help_tags" => {
let helplang: String = ctx.vim.eval("&helplang").await?;
let runtimepath: String = ctx.vim.eval("&runtimepath").await?;
let doc_tags = std::iter::once("/doc/tags".to_string()).chain(
helplang
.split(',')
.filter(|&lang| lang!= "en")
.map(|lang| format!("/doc/tags-{lang}")),
);
let lines = crate::helptags::generate_tag_lines(doc_tags, &runtimepath);
return Ok(to_small_provider_source(lines));
}
_ => {}
}
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
if let Some(value) = source_cmd.into_iter().next() {
match value {
// Source is a String: g:__t_string, g:__t_func_string
Value::String(command) => {
let shell_cmd = ShellCommand::new(command, ctx.cwd.to_path_buf());
let cache_file = shell_cmd.cache_file_path()?;
const DIRECT_CREATE_NEW_SOURCE: &[&str] = &["files"];
let create_new_source_directly =
DIRECT_CREATE_NEW_SOURCE.contains(&ctx.provider_id());
let provider_source = if create_new_source_directly || ctx.env.no_cache {
execute_and_write_cache(&shell_cmd.command, cache_file).await?
} else {
match shell_cmd.cache_digest() {
Some(digest) => ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: false,
},
None => execute_and_write_cache(&shell_cmd.command, cache_file).await?,
}
};
if let ProviderSource::CachedFile { path,.. } = &provider_source {
ctx.vim.set_var("g:__clap_forerunner_tempfile", path)?;
}
return Ok(provider_source);
}
// Source is a List: g:__t_list, g:__t_func_list
Value::Array(arr) => {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
return Ok(to_small_provider_source(lines));
}
_ => {}
}
}
Ok(ProviderSource::Uninitialized)
}
fn on_initialized_source(
provider_source: ProviderSource,
ctx: &Context,
init_display: bool,
) -> Result<()> {
if let Some(total) = provider_source.total() {
ctx.vim.set_var("g:clap.display.initial_size", total)?;
}
if init_display {
if let Some(items) = provider_source.try_skim(ctx.provider_id(), 100) {
let printer = Printer::new(ctx.env.display_winwidth, ctx.env.icon);
let DisplayLines {
lines,
icon_added,
truncated_map,
..
} = printer.to_display_lines(items);
let using_cache = provider_source.using_cache();
ctx.vim.exec(
"clap#state#init_display",
json!([lines, truncated_map, icon_added, using_cache]),
)?;
}
if ctx.initializing_prompt_echoed.load(Ordering::SeqCst) {
ctx.vim.bare_exec("clap#helper#echo_clear")?;
}
}
ctx.set_provider_source(provider_source);
Ok(())
}
async fn initialize_list_source(ctx: Context, init_display: bool) -> Result<()> {
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
// Source must be initialized when it is a List: g:__t_list, g:__t_func_list
if let Some(Value::Array(arr)) = source_cmd.into_iter().next() {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
on_initialized_source(to_small_provider_source(lines), &ctx, init_display)?;
}
Ok(())
}
pub async fn initialize_provider(ctx: &Context, init_display: bool) -> Result<()> {
// Skip the initialization.
match ctx.provider_id() {
"grep" | "live_grep" => return Ok(()),
_ => {}
}
if ctx.env.source_is_list {
let ctx = ctx.clone();
ctx.set_provider_source(ProviderSource::Initializing);
// Initialize the list-style providers in another task so that the further messages won't
// be blocked by the initialization in case it takes too long.
tokio::spawn(initialize_list_source(ctx, init_display));
return Ok(());
}
const TIMEOUT: Duration = Duration::from_millis(300);
match tokio::time::timeout(TIMEOUT, initialize_provider_source(ctx)).await {
Ok(Ok(provider_source)) => on_initialized_source(provider_source, ctx, init_display)?,
Ok(Err(e)) => tracing::error!(?e, "Error occurred while initializing the provider source"),
Err(_) => {
// The initialization was not super fast.
tracing::debug!(timeout =?TIMEOUT, "Did not receive value in time");
let source_cmd: Vec<String> = ctx.vim.bare_call("provider_source_cmd").await?;
let maybe_source_cmd = source_cmd.into_iter().next();
if let Some(source_cmd) = maybe_source_cmd {
ctx.set_provider_source(ProviderSource::Command(source_cmd));
}
/* no longer necessary for grep provider.
// Try creating cache for some potential heavy providers.
match context.provider_id() {
"grep" | "live_grep" => {
context.set_provider_source(ProviderSource::Command(RG_EXEC_CMD.to_string()));
let context = context.clone();
let rg_cmd = RgTokioCommand::new(context.cwd.to_path_buf());
let job_id = utils::calculate_hash(&rg_cmd);
job::try_start(
async move {
if let Ok(digest) = rg_cmd.create_cache().await {
let new = ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: true,
};
if!context.terminated.load(Ordering::SeqCst) {
context.set_provider_source(new);
}
}
},
job_id,
);
}
_ => {}
}
*/
}
}
Ok(())
} | }
} | random_line_split |
on_initialize.rs | #![allow(unused)]
use crate::process::ShellCommand;
use crate::stdio_server::job;
use crate::stdio_server::provider::{Context, ProviderSource};
use crate::tools::ctags::ProjectCtagsCommand;
use crate::tools::rg::{RgTokioCommand, RG_EXEC_CMD};
use anyhow::Result;
use filter::SourceItem;
use printer::{DisplayLines, Printer};
use serde_json::{json, Value};
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use types::ClapItem;
use utils::count_lines;
async fn execute_and_write_cache(
cmd: &str,
cache_file: std::path::PathBuf,
) -> std::io::Result<ProviderSource> {
// Can not use subprocess::Exec::shell here.
//
// Must use TokioCommand otherwise the timeout may not work.
let mut tokio_cmd = crate::process::tokio::shell_command(cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let total = count_lines(std::fs::File::open(&cache_file)?)?;
Ok(ProviderSource::CachedFile {
total,
path: cache_file,
refreshed: true,
})
}
fn to_small_provider_source(lines: Vec<String>) -> ProviderSource {
let total = lines.len();
let items = lines
.into_iter()
.map(|line| Arc::new(SourceItem::from(line)) as Arc<dyn ClapItem>)
.collect::<Vec<_>>();
ProviderSource::Small { total, items }
}
/// Performs the initialization like collecting the source and total number of source items.
async fn initialize_provider_source(ctx: &Context) -> Result<ProviderSource> {
// Known providers.
match ctx.provider_id() {
"blines" => {
let total = count_lines(std::fs::File::open(&ctx.env.start_buffer_path)?)?;
let path = ctx.env.start_buffer_path.clone();
return Ok(ProviderSource::File { total, path });
}
"tags" => {
let items = crate::tools::ctags::buffer_tag_items(&ctx.env.start_buffer_path, false)?;
let total = items.len();
return Ok(ProviderSource::Small { total, items });
}
"proj_tags" => {
let ctags_cmd = ProjectCtagsCommand::with_cwd(ctx.cwd.to_path_buf());
let provider_source = if ctx.env.no_cache {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
} else {
match ctags_cmd.ctags_cache() {
Some((total, path)) => ProviderSource::CachedFile {
total,
path,
refreshed: false,
},
None => {
let lines = ctags_cmd.execute_and_write_cache().await?;
to_small_provider_source(lines)
}
}
};
return Ok(provider_source);
}
"help_tags" => {
let helplang: String = ctx.vim.eval("&helplang").await?;
let runtimepath: String = ctx.vim.eval("&runtimepath").await?;
let doc_tags = std::iter::once("/doc/tags".to_string()).chain(
helplang
.split(',')
.filter(|&lang| lang!= "en")
.map(|lang| format!("/doc/tags-{lang}")),
);
let lines = crate::helptags::generate_tag_lines(doc_tags, &runtimepath);
return Ok(to_small_provider_source(lines));
}
_ => {}
}
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
if let Some(value) = source_cmd.into_iter().next() {
match value {
// Source is a String: g:__t_string, g:__t_func_string
Value::String(command) => {
let shell_cmd = ShellCommand::new(command, ctx.cwd.to_path_buf());
let cache_file = shell_cmd.cache_file_path()?;
const DIRECT_CREATE_NEW_SOURCE: &[&str] = &["files"];
let create_new_source_directly =
DIRECT_CREATE_NEW_SOURCE.contains(&ctx.provider_id());
let provider_source = if create_new_source_directly || ctx.env.no_cache {
execute_and_write_cache(&shell_cmd.command, cache_file).await?
} else {
match shell_cmd.cache_digest() {
Some(digest) => ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: false,
},
None => execute_and_write_cache(&shell_cmd.command, cache_file).await?,
}
};
if let ProviderSource::CachedFile { path,.. } = &provider_source {
ctx.vim.set_var("g:__clap_forerunner_tempfile", path)?;
}
return Ok(provider_source);
}
// Source is a List: g:__t_list, g:__t_func_list
Value::Array(arr) => {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else {
None
}
})
.collect::<Vec<_>>();
return Ok(to_small_provider_source(lines));
}
_ => {}
}
}
Ok(ProviderSource::Uninitialized)
}
fn on_initialized_source(
provider_source: ProviderSource,
ctx: &Context,
init_display: bool,
) -> Result<()> {
if let Some(total) = provider_source.total() {
ctx.vim.set_var("g:clap.display.initial_size", total)?;
}
if init_display {
if let Some(items) = provider_source.try_skim(ctx.provider_id(), 100) {
let printer = Printer::new(ctx.env.display_winwidth, ctx.env.icon);
let DisplayLines {
lines,
icon_added,
truncated_map,
..
} = printer.to_display_lines(items);
let using_cache = provider_source.using_cache();
ctx.vim.exec(
"clap#state#init_display",
json!([lines, truncated_map, icon_added, using_cache]),
)?;
}
if ctx.initializing_prompt_echoed.load(Ordering::SeqCst) {
ctx.vim.bare_exec("clap#helper#echo_clear")?;
}
}
ctx.set_provider_source(provider_source);
Ok(())
}
async fn initialize_list_source(ctx: Context, init_display: bool) -> Result<()> {
let source_cmd: Vec<Value> = ctx.vim.bare_call("provider_source").await?;
// Source must be initialized when it is a List: g:__t_list, g:__t_func_list
if let Some(Value::Array(arr)) = source_cmd.into_iter().next() {
let lines = arr
.into_iter()
.filter_map(|v| {
if let Value::String(s) = v {
Some(s)
} else |
})
.collect::<Vec<_>>();
on_initialized_source(to_small_provider_source(lines), &ctx, init_display)?;
}
Ok(())
}
pub async fn initialize_provider(ctx: &Context, init_display: bool) -> Result<()> {
// Skip the initialization.
match ctx.provider_id() {
"grep" | "live_grep" => return Ok(()),
_ => {}
}
if ctx.env.source_is_list {
let ctx = ctx.clone();
ctx.set_provider_source(ProviderSource::Initializing);
// Initialize the list-style providers in another task so that the further messages won't
// be blocked by the initialization in case it takes too long.
tokio::spawn(initialize_list_source(ctx, init_display));
return Ok(());
}
const TIMEOUT: Duration = Duration::from_millis(300);
match tokio::time::timeout(TIMEOUT, initialize_provider_source(ctx)).await {
Ok(Ok(provider_source)) => on_initialized_source(provider_source, ctx, init_display)?,
Ok(Err(e)) => tracing::error!(?e, "Error occurred while initializing the provider source"),
Err(_) => {
// The initialization was not super fast.
tracing::debug!(timeout =?TIMEOUT, "Did not receive value in time");
let source_cmd: Vec<String> = ctx.vim.bare_call("provider_source_cmd").await?;
let maybe_source_cmd = source_cmd.into_iter().next();
if let Some(source_cmd) = maybe_source_cmd {
ctx.set_provider_source(ProviderSource::Command(source_cmd));
}
/* no longer necessary for grep provider.
// Try creating cache for some potential heavy providers.
match context.provider_id() {
"grep" | "live_grep" => {
context.set_provider_source(ProviderSource::Command(RG_EXEC_CMD.to_string()));
let context = context.clone();
let rg_cmd = RgTokioCommand::new(context.cwd.to_path_buf());
let job_id = utils::calculate_hash(&rg_cmd);
job::try_start(
async move {
if let Ok(digest) = rg_cmd.create_cache().await {
let new = ProviderSource::CachedFile {
total: digest.total,
path: digest.cached_path,
refreshed: true,
};
if!context.terminated.load(Ordering::SeqCst) {
context.set_provider_source(new);
}
}
},
job_id,
);
}
_ => {}
}
*/
}
}
Ok(())
}
| {
None
} | conditional_block |
mod.rs | use std::path::{Path};
use std::fs::{File, PathExt};
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use std::ffi::CString;
use alsa::{PCM, Stream, Mode, Format, Access, Prepared};
use std::collections::VecDeque;
use std;
use num;
#[derive(Clone)]
pub struct Complex<T> {
pub i: T,
pub q: T,
}
impl<T: num::traits::Float> Complex<T> {
pub fn mul(&mut self, a: &Complex<T>) {
let i = self.i * a.i - self.q * a.q;
let q = self.i * a.q + self.q * a.i;
self.i = i;
self.q = q;
}
}
pub struct Alsa {
sps: u32,
pcm: PCM<Prepared>,
}
impl Alsa {
pub fn new(sps: u32) -> Alsa {
let pcm = PCM::open("default", Stream::Playback, Mode::Blocking).unwrap();
let mut pcm = pcm.set_parameters(Format::FloatLE, Access::Interleaved, 1, sps as usize).ok().unwrap();
Alsa { sps: sps, pcm: pcm }
}
pub fn write(&mut self, buf: &Vec<f32>) {
self.pcm.write_interleaved(&buf).unwrap();
}
}
pub fn buildsine(freq: f64, sps: f64, amp: f32) -> Option<Vec<Complex<f32>>> {
// Do not build if too low in frequency.
if freq.abs() < 500.0 {
return Option::Some(vec![Complex { i: 1.0, q: 0.0 } ]);
}
if freq * 4.0 > sps {
return Option::None;
}
// How many of our smallest units of time represented
// by a sample do we need for a full cycle of the
// frequency.
let timepersample = 1.0f64 / sps as f64;
let units = ((1.0 / freq).abs() / timepersample).abs();
//println!("timepersample:{} freqfullwave:{}",
// timepersample,
// 1.0 / freq
//);
// Try to find a multiple of units that is as close as possible
// to a whole integer number of units.
let mut low_diff = std::f64::MAX;
let mut low_mul = 0usize;
for x in 1..100000 {
let m = units * x as f64;
let diff = m - m.floor();
if diff < low_diff {
low_diff = diff;
low_mul = x;
}
}
let iunits = (units * low_mul as f64).floor() as usize;
println!("pre-built cosine for freq({}) with units({}) and diff({})", freq, units, low_diff);
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..iunits {
let curtime = (x as f64) * timepersample;
out.push(Complex {
i: (curtime * freq * std::f64::consts::PI * 2.0).cos() as f32 * amp,
q: (curtime * freq * std::f64::consts::PI * 2.0).sin() as f32 * amp,
});
}
Option::Some(out)
}
pub struct FMDemod {
sps: f64,
offset: f64,
bw: f32,
q0: usize,
q1: usize,
li: f32,
lq: f32,
ifs: Vec<Complex<f32>>,
ifsndx: usize,
rsum: f32,
tapi: usize,
tapvi: Vec<f32>,
tapvq: Vec<f32>,
taps: Vec<f32>,
decim: usize,
curslack: f32,
maxphase: f32,
slack: f32,
audiodecim: usize,
pub sq: isize,
devsqlimit: isize,
sindex: f64,
}
impl FMDemod {
pub fn new(sps: f64, decim: usize, offset: f64, bw: f32, taps: Vec<f32>, devsqlimit: isize) -> FMDemod {
let ifs = buildsine(offset, sps, 1.0).unwrap();
let mut tapvi: Vec<f32> = Vec::new();
let mut tapvq: Vec<f32> = Vec::new();
for x in 0..taps.len() {
tapvi.push(0.0);
tapvq.push(0.0);
}
// If the decimation is not set perfectly then we will have
// a fractional part and we need to insert some padding when
// it reaches a value representing a whole output sample.
let actual_audio_decim = (sps / (decim as f64) / 16000.0) as f32;
// Make sure that slack is not >= 1.0
let slack = actual_audio_decim.fract();
let pract_audio_decim = actual_audio_decim.ceil() as usize;
let fmaxphaserot = ((std::f64::consts::PI * 2.0f64) / (sps / (decim as f64))) * bw as f64;
println!("slack:{} pract_audio_decim:{} decim:{} actual_audio_decim:{}",
slack, pract_audio_decim, decim, actual_audio_decim
);
FMDemod {
devsqlimit: devsqlimit,
maxphase: fmaxphaserot as f32,
audiodecim: pract_audio_decim,
slack: slack,
sps: sps,
offset: offset,
bw: bw,
li: 0.0,
lq: 0.0,
q0: 0,
q1: 0,
ifs: ifs,
ifsndx: 0,
rsum: 0.0,
tapi: 0,
tapvi: tapvi,
tapvq: tapvq,
taps: taps,
decim: decim,
curslack: 0.0,
sq: 0,
sindex: 0.0,
}
}
pub fn work(&mut self, stream: &Vec<Complex<f32>>) -> Vec<f32> {
let mut buf: Vec<f32> = Vec::with_capacity(stream.len() / self.decim / self.audiodecim);
let timepersample = 1.0f64 / self.sps as f64;
let mut lr: f32 = 0.0;
for x in 0..stream.len() {
let mut s = stream[x].clone();
//let ifc = Complex {
// i: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).cos() as f32,
// q: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).sin() as f32,
//};
//self.sindex += 1.0;
s.mul(&self.ifs[self.ifsndx]);
self.ifsndx = if self.ifsndx + 1 >= self.ifs.len() {
0
} else {
self.ifsndx + 1
};
if self.q0 == self.decim {
self.q0 = 0;
if self.curslack >= 1.0 {
// Hopefully, the slack is < 1.0
self.curslack = self.curslack.fract();
self.li = s.i;
self.lq = s.q;
continue;
}
self.tapvi[self.tapi as usize] = s.i;
self.tapvq[self.tapi as usize] = s.q;
let mut si = 0.0f32;
let mut sq = 0.0f32;
for ti in 0..self.taps.len() {
let off = if (ti > self.tapi) | else { self.tapi - ti };
si += self.tapvi[off] * self.taps[ti];
sq += self.tapvq[off] * self.taps[ti];
}
self.tapi += 1;
if self.tapi >= self.taps.len() {
self.tapi = 0;
}
s.i = si;
s.q = sq;
let mut a = s.i.atan2(s.q);
let mut b = self.li.atan2(self.lq);
let mut r = 0f32;
r = a - b;
if r > std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 - r;
}
if r < -std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 + r;
}
// This limits sharp impulses where spikes have slipped
// through our taps filter.
if r.abs() < self.maxphase {
self.rsum += r;
lr = r;
self.sq -= 1;
if self.sq < -300 {
self.sq = -300;
}
} else {
//self.rsum += lr;
self.sq += 1;
if self.sq > 3 {
self.sq = 3;
}
}
self.q1 += 1;
if self.q1 == self.audiodecim {
self.q1 = 0;
// Track how much we are off on the audio output
// due to decimation of the input stream by a value
// that causes our audio decimation to have a fractional
// part.
self.curslack += self.slack;
self.rsum /= self.audiodecim as f32;
if self.sq > 0 {
buf.push(0.0);
} else {
buf.push(self.rsum);
}
self.rsum = 0f32;
}
}
self.li = s.i;
self.lq = s.q;
self.q0 += 1;
}
// Return the buffer containing the demodulated data.
buf
}
}
#[inline]
fn u16tou8ale(v: u16) -> [u8; 2] {
[
v as u8,
(v >> 8) as u8,
]
}
// little endian
#[inline]
fn u32tou8ale(v: u32) -> [u8; 4] {
[
v as u8,
(v >> 8) as u8,
(v >> 16) as u8,
(v >> 24) as u8,
]
}
pub fn wavei8write(path: String, sps: u32, buf: &Vec<f32>) {
use std::fs::File;
use std::io::Write;
let datatotalsize = buf.len() as u32 * 4;
let mut fd = File::create(path).unwrap();
fd.write("RIFF".as_bytes()); // 4
fd.write(&u32tou8ale((datatotalsize + 44) - 8)); // filesize - 8 // 4
fd.write("WAVE".as_bytes()); // // 4
fd.write("fmt ".as_bytes()); // <format marker> // 4
fd.write(&u32tou8ale(16)); // <format data length> // 4
fd.write(&u16tou8ale(3)); // PCM // 2
fd.write(&u16tou8ale(1)); // 1 channel // 2
fd.write(&u32tou8ale(sps)); // sample frequency/rate // 4
fd.write(&u32tou8ale(sps * 4)); // sps * bitsize * channels / 8 (byte rate) // 4
fd.write(&u16tou8ale(4)); // bitsize * channels / 8 (block-align) // 2
fd.write(&u16tou8ale(32)); // bits per sample // 2
fd.write("data".as_bytes()); // <data marker> // 4
fd.write(&u32tou8ale(datatotalsize)); // datasize = filesize - 44 // 4
for x in 0..buf.len() {
fd.write_f32::<LittleEndian>(buf[x]);
}
//unsafe {
// let tmp = mem::transmute::<&Vec<i8>, &Vec<u8>>(buf);
// fd.write(tmp.as_slice());
//}
}
pub struct FileSource {
fp: File,
}
impl FileSource {
pub fn new(path: String) -> FileSource {
FileSource {
fp: File::open(path).unwrap(),
}
}
pub fn recv(&mut self) -> Vec<Complex<f32>> {
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..1024*1024*20 {
let i = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
let q = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
out.push(Complex { i: i, q: q });
}
out
}
}
| { self.taps.len() - (ti - self.tapi)} | conditional_block |
mod.rs | use std::path::{Path};
use std::fs::{File, PathExt};
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use std::ffi::CString;
use alsa::{PCM, Stream, Mode, Format, Access, Prepared};
use std::collections::VecDeque;
use std;
use num;
#[derive(Clone)]
pub struct Complex<T> {
pub i: T,
pub q: T,
}
impl<T: num::traits::Float> Complex<T> {
pub fn mul(&mut self, a: &Complex<T>) |
}
pub struct Alsa {
sps: u32,
pcm: PCM<Prepared>,
}
impl Alsa {
pub fn new(sps: u32) -> Alsa {
let pcm = PCM::open("default", Stream::Playback, Mode::Blocking).unwrap();
let mut pcm = pcm.set_parameters(Format::FloatLE, Access::Interleaved, 1, sps as usize).ok().unwrap();
Alsa { sps: sps, pcm: pcm }
}
pub fn write(&mut self, buf: &Vec<f32>) {
self.pcm.write_interleaved(&buf).unwrap();
}
}
pub fn buildsine(freq: f64, sps: f64, amp: f32) -> Option<Vec<Complex<f32>>> {
// Do not build if too low in frequency.
if freq.abs() < 500.0 {
return Option::Some(vec![Complex { i: 1.0, q: 0.0 } ]);
}
if freq * 4.0 > sps {
return Option::None;
}
// How many of our smallest units of time represented
// by a sample do we need for a full cycle of the
// frequency.
let timepersample = 1.0f64 / sps as f64;
let units = ((1.0 / freq).abs() / timepersample).abs();
//println!("timepersample:{} freqfullwave:{}",
// timepersample,
// 1.0 / freq
//);
// Try to find a multiple of units that is as close as possible
// to a whole integer number of units.
let mut low_diff = std::f64::MAX;
let mut low_mul = 0usize;
for x in 1..100000 {
let m = units * x as f64;
let diff = m - m.floor();
if diff < low_diff {
low_diff = diff;
low_mul = x;
}
}
let iunits = (units * low_mul as f64).floor() as usize;
println!("pre-built cosine for freq({}) with units({}) and diff({})", freq, units, low_diff);
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..iunits {
let curtime = (x as f64) * timepersample;
out.push(Complex {
i: (curtime * freq * std::f64::consts::PI * 2.0).cos() as f32 * amp,
q: (curtime * freq * std::f64::consts::PI * 2.0).sin() as f32 * amp,
});
}
Option::Some(out)
}
pub struct FMDemod {
sps: f64,
offset: f64,
bw: f32,
q0: usize,
q1: usize,
li: f32,
lq: f32,
ifs: Vec<Complex<f32>>,
ifsndx: usize,
rsum: f32,
tapi: usize,
tapvi: Vec<f32>,
tapvq: Vec<f32>,
taps: Vec<f32>,
decim: usize,
curslack: f32,
maxphase: f32,
slack: f32,
audiodecim: usize,
pub sq: isize,
devsqlimit: isize,
sindex: f64,
}
impl FMDemod {
pub fn new(sps: f64, decim: usize, offset: f64, bw: f32, taps: Vec<f32>, devsqlimit: isize) -> FMDemod {
let ifs = buildsine(offset, sps, 1.0).unwrap();
let mut tapvi: Vec<f32> = Vec::new();
let mut tapvq: Vec<f32> = Vec::new();
for x in 0..taps.len() {
tapvi.push(0.0);
tapvq.push(0.0);
}
// If the decimation is not set perfectly then we will have
// a fractional part and we need to insert some padding when
// it reaches a value representing a whole output sample.
let actual_audio_decim = (sps / (decim as f64) / 16000.0) as f32;
// Make sure that slack is not >= 1.0
let slack = actual_audio_decim.fract();
let pract_audio_decim = actual_audio_decim.ceil() as usize;
let fmaxphaserot = ((std::f64::consts::PI * 2.0f64) / (sps / (decim as f64))) * bw as f64;
println!("slack:{} pract_audio_decim:{} decim:{} actual_audio_decim:{}",
slack, pract_audio_decim, decim, actual_audio_decim
);
FMDemod {
devsqlimit: devsqlimit,
maxphase: fmaxphaserot as f32,
audiodecim: pract_audio_decim,
slack: slack,
sps: sps,
offset: offset,
bw: bw,
li: 0.0,
lq: 0.0,
q0: 0,
q1: 0,
ifs: ifs,
ifsndx: 0,
rsum: 0.0,
tapi: 0,
tapvi: tapvi,
tapvq: tapvq,
taps: taps,
decim: decim,
curslack: 0.0,
sq: 0,
sindex: 0.0,
}
}
pub fn work(&mut self, stream: &Vec<Complex<f32>>) -> Vec<f32> {
let mut buf: Vec<f32> = Vec::with_capacity(stream.len() / self.decim / self.audiodecim);
let timepersample = 1.0f64 / self.sps as f64;
let mut lr: f32 = 0.0;
for x in 0..stream.len() {
let mut s = stream[x].clone();
//let ifc = Complex {
// i: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).cos() as f32,
// q: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).sin() as f32,
//};
//self.sindex += 1.0;
s.mul(&self.ifs[self.ifsndx]);
self.ifsndx = if self.ifsndx + 1 >= self.ifs.len() {
0
} else {
self.ifsndx + 1
};
if self.q0 == self.decim {
self.q0 = 0;
if self.curslack >= 1.0 {
// Hopefully, the slack is < 1.0
self.curslack = self.curslack.fract();
self.li = s.i;
self.lq = s.q;
continue;
}
self.tapvi[self.tapi as usize] = s.i;
self.tapvq[self.tapi as usize] = s.q;
let mut si = 0.0f32;
let mut sq = 0.0f32;
for ti in 0..self.taps.len() {
let off = if (ti > self.tapi) { self.taps.len() - (ti - self.tapi)} else { self.tapi - ti };
si += self.tapvi[off] * self.taps[ti];
sq += self.tapvq[off] * self.taps[ti];
}
self.tapi += 1;
if self.tapi >= self.taps.len() {
self.tapi = 0;
}
s.i = si;
s.q = sq;
let mut a = s.i.atan2(s.q);
let mut b = self.li.atan2(self.lq);
let mut r = 0f32;
r = a - b;
if r > std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 - r;
}
if r < -std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 + r;
}
// This limits sharp impulses where spikes have slipped
// through our taps filter.
if r.abs() < self.maxphase {
self.rsum += r;
lr = r;
self.sq -= 1;
if self.sq < -300 {
self.sq = -300;
}
} else {
//self.rsum += lr;
self.sq += 1;
if self.sq > 3 {
self.sq = 3;
}
}
self.q1 += 1;
if self.q1 == self.audiodecim {
self.q1 = 0;
// Track how much we are off on the audio output
// due to decimation of the input stream by a value
// that causes our audio decimation to have a fractional
// part.
self.curslack += self.slack;
self.rsum /= self.audiodecim as f32;
if self.sq > 0 {
buf.push(0.0);
} else {
buf.push(self.rsum);
}
self.rsum = 0f32;
}
}
self.li = s.i;
self.lq = s.q;
self.q0 += 1;
}
// Return the buffer containing the demodulated data.
buf
}
}
#[inline]
fn u16tou8ale(v: u16) -> [u8; 2] {
[
v as u8,
(v >> 8) as u8,
]
}
// little endian
#[inline]
fn u32tou8ale(v: u32) -> [u8; 4] {
[
v as u8,
(v >> 8) as u8,
(v >> 16) as u8,
(v >> 24) as u8,
]
}
pub fn wavei8write(path: String, sps: u32, buf: &Vec<f32>) {
use std::fs::File;
use std::io::Write;
let datatotalsize = buf.len() as u32 * 4;
let mut fd = File::create(path).unwrap();
fd.write("RIFF".as_bytes()); // 4
fd.write(&u32tou8ale((datatotalsize + 44) - 8)); // filesize - 8 // 4
fd.write("WAVE".as_bytes()); // // 4
fd.write("fmt ".as_bytes()); // <format marker> // 4
fd.write(&u32tou8ale(16)); // <format data length> // 4
fd.write(&u16tou8ale(3)); // PCM // 2
fd.write(&u16tou8ale(1)); // 1 channel // 2
fd.write(&u32tou8ale(sps)); // sample frequency/rate // 4
fd.write(&u32tou8ale(sps * 4)); // sps * bitsize * channels / 8 (byte rate) // 4
fd.write(&u16tou8ale(4)); // bitsize * channels / 8 (block-align) // 2
fd.write(&u16tou8ale(32)); // bits per sample // 2
fd.write("data".as_bytes()); // <data marker> // 4
fd.write(&u32tou8ale(datatotalsize)); // datasize = filesize - 44 // 4
for x in 0..buf.len() {
fd.write_f32::<LittleEndian>(buf[x]);
}
//unsafe {
// let tmp = mem::transmute::<&Vec<i8>, &Vec<u8>>(buf);
// fd.write(tmp.as_slice());
//}
}
pub struct FileSource {
fp: File,
}
impl FileSource {
pub fn new(path: String) -> FileSource {
FileSource {
fp: File::open(path).unwrap(),
}
}
pub fn recv(&mut self) -> Vec<Complex<f32>> {
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..1024*1024*20 {
let i = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
let q = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
out.push(Complex { i: i, q: q });
}
out
}
}
| {
let i = self.i * a.i - self.q * a.q;
let q = self.i * a.q + self.q * a.i;
self.i = i;
self.q = q;
} | identifier_body |
mod.rs | use std::path::{Path};
use std::fs::{File, PathExt};
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use std::ffi::CString;
use alsa::{PCM, Stream, Mode, Format, Access, Prepared};
use std::collections::VecDeque;
use std;
use num;
#[derive(Clone)]
pub struct Complex<T> {
pub i: T,
pub q: T,
}
impl<T: num::traits::Float> Complex<T> {
pub fn mul(&mut self, a: &Complex<T>) {
let i = self.i * a.i - self.q * a.q;
let q = self.i * a.q + self.q * a.i;
self.i = i;
self.q = q;
}
}
pub struct Alsa {
sps: u32,
pcm: PCM<Prepared>,
}
impl Alsa {
pub fn new(sps: u32) -> Alsa {
let pcm = PCM::open("default", Stream::Playback, Mode::Blocking).unwrap();
let mut pcm = pcm.set_parameters(Format::FloatLE, Access::Interleaved, 1, sps as usize).ok().unwrap();
Alsa { sps: sps, pcm: pcm }
}
pub fn write(&mut self, buf: &Vec<f32>) {
self.pcm.write_interleaved(&buf).unwrap();
}
}
pub fn | (freq: f64, sps: f64, amp: f32) -> Option<Vec<Complex<f32>>> {
// Do not build if too low in frequency.
if freq.abs() < 500.0 {
return Option::Some(vec![Complex { i: 1.0, q: 0.0 } ]);
}
if freq * 4.0 > sps {
return Option::None;
}
// How many of our smallest units of time represented
// by a sample do we need for a full cycle of the
// frequency.
let timepersample = 1.0f64 / sps as f64;
let units = ((1.0 / freq).abs() / timepersample).abs();
//println!("timepersample:{} freqfullwave:{}",
// timepersample,
// 1.0 / freq
//);
// Try to find a multiple of units that is as close as possible
// to a whole integer number of units.
let mut low_diff = std::f64::MAX;
let mut low_mul = 0usize;
for x in 1..100000 {
let m = units * x as f64;
let diff = m - m.floor();
if diff < low_diff {
low_diff = diff;
low_mul = x;
}
}
let iunits = (units * low_mul as f64).floor() as usize;
println!("pre-built cosine for freq({}) with units({}) and diff({})", freq, units, low_diff);
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..iunits {
let curtime = (x as f64) * timepersample;
out.push(Complex {
i: (curtime * freq * std::f64::consts::PI * 2.0).cos() as f32 * amp,
q: (curtime * freq * std::f64::consts::PI * 2.0).sin() as f32 * amp,
});
}
Option::Some(out)
}
pub struct FMDemod {
sps: f64,
offset: f64,
bw: f32,
q0: usize,
q1: usize,
li: f32,
lq: f32,
ifs: Vec<Complex<f32>>,
ifsndx: usize,
rsum: f32,
tapi: usize,
tapvi: Vec<f32>,
tapvq: Vec<f32>,
taps: Vec<f32>,
decim: usize,
curslack: f32,
maxphase: f32,
slack: f32,
audiodecim: usize,
pub sq: isize,
devsqlimit: isize,
sindex: f64,
}
impl FMDemod {
pub fn new(sps: f64, decim: usize, offset: f64, bw: f32, taps: Vec<f32>, devsqlimit: isize) -> FMDemod {
let ifs = buildsine(offset, sps, 1.0).unwrap();
let mut tapvi: Vec<f32> = Vec::new();
let mut tapvq: Vec<f32> = Vec::new();
for x in 0..taps.len() {
tapvi.push(0.0);
tapvq.push(0.0);
}
// If the decimation is not set perfectly then we will have
// a fractional part and we need to insert some padding when
// it reaches a value representing a whole output sample.
let actual_audio_decim = (sps / (decim as f64) / 16000.0) as f32;
// Make sure that slack is not >= 1.0
let slack = actual_audio_decim.fract();
let pract_audio_decim = actual_audio_decim.ceil() as usize;
let fmaxphaserot = ((std::f64::consts::PI * 2.0f64) / (sps / (decim as f64))) * bw as f64;
println!("slack:{} pract_audio_decim:{} decim:{} actual_audio_decim:{}",
slack, pract_audio_decim, decim, actual_audio_decim
);
FMDemod {
devsqlimit: devsqlimit,
maxphase: fmaxphaserot as f32,
audiodecim: pract_audio_decim,
slack: slack,
sps: sps,
offset: offset,
bw: bw,
li: 0.0,
lq: 0.0,
q0: 0,
q1: 0,
ifs: ifs,
ifsndx: 0,
rsum: 0.0,
tapi: 0,
tapvi: tapvi,
tapvq: tapvq,
taps: taps,
decim: decim,
curslack: 0.0,
sq: 0,
sindex: 0.0,
}
}
pub fn work(&mut self, stream: &Vec<Complex<f32>>) -> Vec<f32> {
let mut buf: Vec<f32> = Vec::with_capacity(stream.len() / self.decim / self.audiodecim);
let timepersample = 1.0f64 / self.sps as f64;
let mut lr: f32 = 0.0;
for x in 0..stream.len() {
let mut s = stream[x].clone();
//let ifc = Complex {
// i: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).cos() as f32,
// q: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).sin() as f32,
//};
//self.sindex += 1.0;
s.mul(&self.ifs[self.ifsndx]);
self.ifsndx = if self.ifsndx + 1 >= self.ifs.len() {
0
} else {
self.ifsndx + 1
};
if self.q0 == self.decim {
self.q0 = 0;
if self.curslack >= 1.0 {
// Hopefully, the slack is < 1.0
self.curslack = self.curslack.fract();
self.li = s.i;
self.lq = s.q;
continue;
}
self.tapvi[self.tapi as usize] = s.i;
self.tapvq[self.tapi as usize] = s.q;
let mut si = 0.0f32;
let mut sq = 0.0f32;
for ti in 0..self.taps.len() {
let off = if (ti > self.tapi) { self.taps.len() - (ti - self.tapi)} else { self.tapi - ti };
si += self.tapvi[off] * self.taps[ti];
sq += self.tapvq[off] * self.taps[ti];
}
self.tapi += 1;
if self.tapi >= self.taps.len() {
self.tapi = 0;
}
s.i = si;
s.q = sq;
let mut a = s.i.atan2(s.q);
let mut b = self.li.atan2(self.lq);
let mut r = 0f32;
r = a - b;
if r > std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 - r;
}
if r < -std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 + r;
}
// This limits sharp impulses where spikes have slipped
// through our taps filter.
if r.abs() < self.maxphase {
self.rsum += r;
lr = r;
self.sq -= 1;
if self.sq < -300 {
self.sq = -300;
}
} else {
//self.rsum += lr;
self.sq += 1;
if self.sq > 3 {
self.sq = 3;
}
}
self.q1 += 1;
if self.q1 == self.audiodecim {
self.q1 = 0;
// Track how much we are off on the audio output
// due to decimation of the input stream by a value
// that causes our audio decimation to have a fractional
// part.
self.curslack += self.slack;
self.rsum /= self.audiodecim as f32;
if self.sq > 0 {
buf.push(0.0);
} else {
buf.push(self.rsum);
}
self.rsum = 0f32;
}
}
self.li = s.i;
self.lq = s.q;
self.q0 += 1;
}
// Return the buffer containing the demodulated data.
buf
}
}
#[inline]
fn u16tou8ale(v: u16) -> [u8; 2] {
[
v as u8,
(v >> 8) as u8,
]
}
// little endian
#[inline]
fn u32tou8ale(v: u32) -> [u8; 4] {
[
v as u8,
(v >> 8) as u8,
(v >> 16) as u8,
(v >> 24) as u8,
]
}
pub fn wavei8write(path: String, sps: u32, buf: &Vec<f32>) {
use std::fs::File;
use std::io::Write;
let datatotalsize = buf.len() as u32 * 4;
let mut fd = File::create(path).unwrap();
fd.write("RIFF".as_bytes()); // 4
fd.write(&u32tou8ale((datatotalsize + 44) - 8)); // filesize - 8 // 4
fd.write("WAVE".as_bytes()); // // 4
fd.write("fmt ".as_bytes()); // <format marker> // 4
fd.write(&u32tou8ale(16)); // <format data length> // 4
fd.write(&u16tou8ale(3)); // PCM // 2
fd.write(&u16tou8ale(1)); // 1 channel // 2
fd.write(&u32tou8ale(sps)); // sample frequency/rate // 4
fd.write(&u32tou8ale(sps * 4)); // sps * bitsize * channels / 8 (byte rate) // 4
fd.write(&u16tou8ale(4)); // bitsize * channels / 8 (block-align) // 2
fd.write(&u16tou8ale(32)); // bits per sample // 2
fd.write("data".as_bytes()); // <data marker> // 4
fd.write(&u32tou8ale(datatotalsize)); // datasize = filesize - 44 // 4
for x in 0..buf.len() {
fd.write_f32::<LittleEndian>(buf[x]);
}
//unsafe {
// let tmp = mem::transmute::<&Vec<i8>, &Vec<u8>>(buf);
// fd.write(tmp.as_slice());
//}
}
pub struct FileSource {
fp: File,
}
impl FileSource {
pub fn new(path: String) -> FileSource {
FileSource {
fp: File::open(path).unwrap(),
}
}
pub fn recv(&mut self) -> Vec<Complex<f32>> {
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..1024*1024*20 {
let i = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
let q = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
out.push(Complex { i: i, q: q });
}
out
}
}
| buildsine | identifier_name |
mod.rs | use std::path::{Path};
use std::fs::{File, PathExt};
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use std::ffi::CString;
use alsa::{PCM, Stream, Mode, Format, Access, Prepared};
use std::collections::VecDeque;
use std;
use num;
#[derive(Clone)]
pub struct Complex<T> {
pub i: T,
pub q: T,
}
impl<T: num::traits::Float> Complex<T> {
pub fn mul(&mut self, a: &Complex<T>) {
let i = self.i * a.i - self.q * a.q;
let q = self.i * a.q + self.q * a.i;
self.i = i;
self.q = q;
}
}
pub struct Alsa {
sps: u32,
pcm: PCM<Prepared>,
}
impl Alsa {
pub fn new(sps: u32) -> Alsa {
let pcm = PCM::open("default", Stream::Playback, Mode::Blocking).unwrap();
let mut pcm = pcm.set_parameters(Format::FloatLE, Access::Interleaved, 1, sps as usize).ok().unwrap();
Alsa { sps: sps, pcm: pcm }
}
pub fn write(&mut self, buf: &Vec<f32>) {
self.pcm.write_interleaved(&buf).unwrap();
}
}
pub fn buildsine(freq: f64, sps: f64, amp: f32) -> Option<Vec<Complex<f32>>> {
// Do not build if too low in frequency.
if freq.abs() < 500.0 {
return Option::Some(vec![Complex { i: 1.0, q: 0.0 } ]);
}
if freq * 4.0 > sps {
return Option::None;
}
// How many of our smallest units of time represented
// by a sample do we need for a full cycle of the
// frequency.
let timepersample = 1.0f64 / sps as f64;
let units = ((1.0 / freq).abs() / timepersample).abs();
//println!("timepersample:{} freqfullwave:{}",
// timepersample,
// 1.0 / freq
//);
// Try to find a multiple of units that is as close as possible
// to a whole integer number of units.
let mut low_diff = std::f64::MAX;
let mut low_mul = 0usize;
for x in 1..100000 {
let m = units * x as f64;
let diff = m - m.floor();
if diff < low_diff {
low_diff = diff;
low_mul = x;
}
}
let iunits = (units * low_mul as f64).floor() as usize;
println!("pre-built cosine for freq({}) with units({}) and diff({})", freq, units, low_diff);
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..iunits {
let curtime = (x as f64) * timepersample;
out.push(Complex {
i: (curtime * freq * std::f64::consts::PI * 2.0).cos() as f32 * amp,
q: (curtime * freq * std::f64::consts::PI * 2.0).sin() as f32 * amp,
});
}
Option::Some(out)
}
pub struct FMDemod {
sps: f64,
offset: f64,
bw: f32,
q0: usize,
q1: usize,
li: f32,
lq: f32,
ifs: Vec<Complex<f32>>,
ifsndx: usize,
rsum: f32,
tapi: usize,
tapvi: Vec<f32>,
tapvq: Vec<f32>,
taps: Vec<f32>,
decim: usize,
curslack: f32,
maxphase: f32,
slack: f32,
audiodecim: usize,
pub sq: isize,
devsqlimit: isize,
sindex: f64,
}
impl FMDemod {
pub fn new(sps: f64, decim: usize, offset: f64, bw: f32, taps: Vec<f32>, devsqlimit: isize) -> FMDemod {
let ifs = buildsine(offset, sps, 1.0).unwrap();
let mut tapvi: Vec<f32> = Vec::new();
let mut tapvq: Vec<f32> = Vec::new();
for x in 0..taps.len() {
tapvi.push(0.0);
tapvq.push(0.0);
}
// If the decimation is not set perfectly then we will have
// a fractional part and we need to insert some padding when
// it reaches a value representing a whole output sample.
let actual_audio_decim = (sps / (decim as f64) / 16000.0) as f32;
// Make sure that slack is not >= 1.0
let slack = actual_audio_decim.fract();
let pract_audio_decim = actual_audio_decim.ceil() as usize;
let fmaxphaserot = ((std::f64::consts::PI * 2.0f64) / (sps / (decim as f64))) * bw as f64;
println!("slack:{} pract_audio_decim:{} decim:{} actual_audio_decim:{}",
slack, pract_audio_decim, decim, actual_audio_decim
);
FMDemod {
devsqlimit: devsqlimit,
maxphase: fmaxphaserot as f32,
audiodecim: pract_audio_decim,
slack: slack,
sps: sps,
offset: offset,
bw: bw,
li: 0.0,
lq: 0.0,
q0: 0,
q1: 0,
ifs: ifs,
ifsndx: 0,
rsum: 0.0,
tapi: 0,
tapvi: tapvi,
tapvq: tapvq,
taps: taps,
decim: decim,
curslack: 0.0,
sq: 0,
sindex: 0.0,
}
}
pub fn work(&mut self, stream: &Vec<Complex<f32>>) -> Vec<f32> {
let mut buf: Vec<f32> = Vec::with_capacity(stream.len() / self.decim / self.audiodecim);
let timepersample = 1.0f64 / self.sps as f64;
let mut lr: f32 = 0.0;
for x in 0..stream.len() {
let mut s = stream[x].clone();
//let ifc = Complex {
// i: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).cos() as f32,
// q: (timepersample * self.sindex * self.offset * std::f64::consts::PI * 2.0).sin() as f32,
//};
//self.sindex += 1.0;
s.mul(&self.ifs[self.ifsndx]);
self.ifsndx = if self.ifsndx + 1 >= self.ifs.len() {
0
} else {
self.ifsndx + 1
};
if self.q0 == self.decim {
self.q0 = 0;
if self.curslack >= 1.0 {
// Hopefully, the slack is < 1.0
self.curslack = self.curslack.fract();
self.li = s.i;
self.lq = s.q;
continue;
}
self.tapvi[self.tapi as usize] = s.i;
self.tapvq[self.tapi as usize] = s.q;
let mut si = 0.0f32;
let mut sq = 0.0f32;
for ti in 0..self.taps.len() {
let off = if (ti > self.tapi) { self.taps.len() - (ti - self.tapi)} else { self.tapi - ti };
si += self.tapvi[off] * self.taps[ti];
sq += self.tapvq[off] * self.taps[ti];
}
self.tapi += 1;
if self.tapi >= self.taps.len() {
self.tapi = 0;
}
s.i = si;
s.q = sq;
let mut a = s.i.atan2(s.q);
let mut b = self.li.atan2(self.lq);
let mut r = 0f32;
r = a - b;
if r > std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 - r;
}
if r < -std::f32::consts::PI {
r = std::f32::consts::PI * 2.0 + r;
}
// This limits sharp impulses where spikes have slipped
// through our taps filter.
if r.abs() < self.maxphase {
self.rsum += r;
lr = r;
self.sq -= 1;
if self.sq < -300 {
self.sq = -300;
}
} else {
//self.rsum += lr;
self.sq += 1;
if self.sq > 3 {
self.sq = 3;
}
}
self.q1 += 1;
if self.q1 == self.audiodecim {
self.q1 = 0;
// Track how much we are off on the audio output
// due to decimation of the input stream by a value
// that causes our audio decimation to have a fractional
// part.
self.curslack += self.slack;
self.rsum /= self.audiodecim as f32;
if self.sq > 0 {
buf.push(0.0);
} else {
buf.push(self.rsum);
}
self.rsum = 0f32;
}
}
self.li = s.i;
self.lq = s.q;
self.q0 += 1;
}
// Return the buffer containing the demodulated data.
buf
}
}
#[inline]
fn u16tou8ale(v: u16) -> [u8; 2] {
[
v as u8,
(v >> 8) as u8,
]
}
// little endian
#[inline]
fn u32tou8ale(v: u32) -> [u8; 4] {
[
v as u8,
(v >> 8) as u8,
(v >> 16) as u8,
(v >> 24) as u8,
]
}
pub fn wavei8write(path: String, sps: u32, buf: &Vec<f32>) {
use std::fs::File;
use std::io::Write;
let datatotalsize = buf.len() as u32 * 4;
let mut fd = File::create(path).unwrap();
fd.write("RIFF".as_bytes()); // 4
fd.write(&u32tou8ale((datatotalsize + 44) - 8)); // filesize - 8 // 4
fd.write("WAVE".as_bytes()); // // 4
fd.write("fmt ".as_bytes()); // <format marker> // 4
fd.write(&u32tou8ale(16)); // <format data length> // 4
fd.write(&u16tou8ale(3)); // PCM // 2
fd.write(&u16tou8ale(1)); // 1 channel // 2
fd.write(&u32tou8ale(sps)); // sample frequency/rate // 4
fd.write(&u32tou8ale(sps * 4)); // sps * bitsize * channels / 8 (byte rate) // 4
fd.write(&u16tou8ale(4)); // bitsize * channels / 8 (block-align) // 2
fd.write(&u16tou8ale(32)); // bits per sample // 2
fd.write("data".as_bytes()); // <data marker> // 4
fd.write(&u32tou8ale(datatotalsize)); // datasize = filesize - 44 // 4
| // let tmp = mem::transmute::<&Vec<i8>, &Vec<u8>>(buf);
// fd.write(tmp.as_slice());
//}
}
pub struct FileSource {
fp: File,
}
impl FileSource {
pub fn new(path: String) -> FileSource {
FileSource {
fp: File::open(path).unwrap(),
}
}
pub fn recv(&mut self) -> Vec<Complex<f32>> {
let mut out: Vec<Complex<f32>> = Vec::new();
for x in 0..1024*1024*20 {
let i = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
let q = match self.fp.read_f32::<LittleEndian>() {
Result::Ok(v) => v,
Result::Err(_) => break,
};
out.push(Complex { i: i, q: q });
}
out
}
} | for x in 0..buf.len() {
fd.write_f32::<LittleEndian>(buf[x]);
}
//unsafe { | random_line_split |
main.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![feature(box_patterns)]
#![feature(rustc_private)]
#![feature(collections)]
#![feature(str_char)]
#![cfg_attr(not(test), feature(exit_status))]
// TODO we're going to allocate a whole bunch of temp Strings, is it worth
// keeping some scratch mem for this and running our own StrPool?
// TODO for lint violations of names, emit a refactor script
#[macro_use]
extern crate log;
extern crate getopts;
extern crate rustc;
extern crate rustc_driver;
extern crate syntax;
extern crate strings;
use rustc::session::Session;
use rustc::session::config::{self, Input};
use rustc_driver::{driver, CompilerCalls, Compilation};
use syntax::ast;
use syntax::codemap::CodeMap;
use syntax::diagnostics;
use syntax::visit;
use std::path::PathBuf;
use std::collections::HashMap;
use changes::ChangeSet;
use visitor::FmtVisitor;
mod changes;
mod visitor;
mod functions;
mod missed_spans;
mod lists;
mod utils;
mod types;
mod expr;
mod imports;
const IDEAL_WIDTH: usize = 80;
const LEEWAY: usize = 5;
const MAX_WIDTH: usize = 100;
const MIN_STRING: usize = 10;
const TAB_SPACES: usize = 4;
const FN_BRACE_STYLE: BraceStyle = BraceStyle::SameLineWhere;
const FN_RETURN_INDENT: ReturnIndent = ReturnIndent::WithArgs;
// When we get scoped annotations, we should have rustfmt::skip.
const SKIP_ANNOTATION: &'static str = "rustfmt_skip";
#[derive(Copy, Clone)]
pub enum | {
Overwrite,
// str is the extension of the new file
NewFile(&'static str),
// Write the output to stdout.
Display,
// Return the result as a mapping from filenames to StringBuffers.
Return(&'static Fn(HashMap<String, String>)),
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum BraceStyle {
AlwaysNextLine,
PreferSameLine,
// Prefer same line except where there is a where clause, in which case force
// the brace to the next line.
SameLineWhere,
}
// How to indent a function's return type.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum ReturnIndent {
// Aligned with the arguments
WithArgs,
// Aligned with the where clause
WithWhereClause,
}
// Formatting which depends on the AST.
fn fmt_ast<'a>(krate: &ast::Crate, codemap: &'a CodeMap) -> ChangeSet<'a> {
let mut visitor = FmtVisitor::from_codemap(codemap);
visit::walk_crate(&mut visitor, krate);
let files = codemap.files.borrow();
if let Some(last) = files.last() {
visitor.format_missing(last.end_pos);
}
visitor.changes
}
// Formatting done on a char by char or line by line basis.
// TODO warn on TODOs and FIXMEs without an issue number
// TODO warn on bad license
// TODO other stuff for parity with make tidy
fn fmt_lines(changes: &mut ChangeSet) {
let mut truncate_todo = Vec::new();
// Iterate over the chars in the change set.
for (f, text) in changes.text() {
let mut trims = vec![];
let mut last_wspace: Option<usize> = None;
let mut line_len = 0;
let mut cur_line = 1;
let mut newline_count = 0;
for (c, b) in text.chars() {
if c == '\n' { // TOOD test for \r too
// Check for (and record) trailing whitespace.
if let Some(lw) = last_wspace {
trims.push((cur_line, lw, b));
line_len -= b - lw;
}
// Check for any line width errors we couldn't correct.
if line_len > MAX_WIDTH {
// TODO store the error rather than reporting immediately.
println!("Rustfmt couldn't fix (sorry). {}:{}: line longer than {} characters",
f, cur_line, MAX_WIDTH);
}
line_len = 0;
cur_line += 1;
newline_count += 1;
last_wspace = None;
} else {
newline_count = 0;
line_len += 1;
if c.is_whitespace() {
if last_wspace.is_none() {
last_wspace = Some(b);
}
} else {
last_wspace = None;
}
}
}
if newline_count > 1 {
debug!("track truncate: {} {} {}", f, text.len, newline_count);
truncate_todo.push((f.to_string(), text.len - newline_count + 1))
}
for &(l, _, _) in trims.iter() {
// TODO store the error rather than reporting immediately.
println!("Rustfmt left trailing whitespace at {}:{} (sorry)", f, l);
}
}
for (f, l) in truncate_todo {
changes.get_mut(&f).truncate(l);
}
}
struct RustFmtCalls {
input_path: Option<PathBuf>,
write_mode: WriteMode,
}
impl<'a> CompilerCalls<'a> for RustFmtCalls {
fn early_callback(&mut self,
_: &getopts::Matches,
_: &diagnostics::registry::Registry)
-> Compilation {
Compilation::Continue
}
fn some_input(&mut self,
input: Input,
input_path: Option<PathBuf>)
-> (Input, Option<PathBuf>) {
match input_path {
Some(ref ip) => self.input_path = Some(ip.clone()),
_ => {
// FIXME should handle string input and write to stdout or something
panic!("No input path");
}
}
(input, input_path)
}
fn no_input(&mut self,
_: &getopts::Matches,
_: &config::Options,
_: &Option<PathBuf>,
_: &Option<PathBuf>,
_: &diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)> {
panic!("No input supplied to RustFmt");
}
fn late_callback(&mut self,
_: &getopts::Matches,
_: &Session,
_: &Input,
_: &Option<PathBuf>,
_: &Option<PathBuf>)
-> Compilation {
Compilation::Continue
}
fn build_controller(&mut self, _: &Session) -> driver::CompileController<'a> {
let write_mode = self.write_mode;
let mut control = driver::CompileController::basic();
control.after_parse.stop = Compilation::Stop;
control.after_parse.callback = box move |state| {
let krate = state.krate.unwrap();
let codemap = state.session.codemap();
let mut changes = fmt_ast(krate, codemap);
// For some reason, the codemap does not include terminating newlines
// so we must add one on for each file. This is sad.
changes.append_newlines();
fmt_lines(&mut changes);
// FIXME(#5) Should be user specified whether to show or replace.
let result = changes.write_all_files(write_mode);
match result {
Err(msg) => println!("Error writing files: {}", msg),
Ok(result) => {
if let WriteMode::Return(callback) = write_mode {
callback(result);
}
}
}
};
control
}
}
fn run(args: Vec<String>, write_mode: WriteMode) {
let mut call_ctxt = RustFmtCalls { input_path: None, write_mode: write_mode };
rustc_driver::run_compiler(&args, &mut call_ctxt);
}
#[cfg(not(test))]
fn main() {
let args: Vec<_> = std::env::args().collect();
//run(args, WriteMode::Display);
run(args, WriteMode::Overwrite);
std::env::set_exit_status(0);
// TODO unit tests
// let fmt = ListFormatting {
// tactic: ListTactic::Horizontal,
// separator: ",",
// trailing_separator: SeparatorTactic::Vertical,
// indent: 2,
// h_width: 80,
// v_width: 100,
// };
// let inputs = vec![(format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new())];
// let s = write_list(&inputs, &fmt);
// println!(" {}", s);
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use std::fs;
use std::io::Read;
use std::sync::atomic;
use super::*;
use super::run;
// For now, the only supported regression tests are idempotent tests - the input and
// output must match exactly.
// FIXME(#28) would be good to check for error messages and fail on them, or at least report.
#[test]
fn idempotent_tests() {
println!("Idempotent tests:");
FAILURES.store(0, atomic::Ordering::Relaxed);
// Get all files in the tests/idem directory
let files = fs::read_dir("tests/idem").unwrap();
// For each file, run rustfmt and collect the output
let mut count = 0;
for entry in files {
let path = entry.unwrap().path();
let file_name = path.to_str().unwrap();
println!("Testing '{}'...", file_name);
run(vec!["rustfmt".to_owned(), file_name.to_owned()], WriteMode::Return(HANDLE_RESULT));
count += 1;
}
// And also dogfood ourselves!
println!("Testing'src/main.rs'...");
run(vec!["rustfmt".to_string(), "src/main.rs".to_string()],
WriteMode::Return(HANDLE_RESULT));
count += 1;
// Display results
let fails = FAILURES.load(atomic::Ordering::Relaxed);
println!("Ran {} idempotent tests; {} failures.", count, fails);
assert!(fails == 0, "{} idempotent tests failed", fails);
}
// 'global' used by sys_tests and handle_result.
static FAILURES: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
// Ick, just needed to get a &'static to handle_result.
static HANDLE_RESULT: &'static Fn(HashMap<String, String>) = &handle_result;
// Compare output to input.
fn handle_result(result: HashMap<String, String>) {
let mut fails = 0;
for file_name in result.keys() {
let mut f = fs::File::open(file_name).unwrap();
let mut text = String::new();
f.read_to_string(&mut text).unwrap();
if result[file_name]!= text {
fails += 1;
println!("Mismatch in {}.", file_name);
println!("{}", result[file_name]);
}
}
if fails > 0 {
FAILURES.fetch_add(1, atomic::Ordering::Relaxed);
}
}
}
| WriteMode | identifier_name |
main.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![feature(box_patterns)]
#![feature(rustc_private)]
#![feature(collections)]
#![feature(str_char)]
#![cfg_attr(not(test), feature(exit_status))]
// TODO we're going to allocate a whole bunch of temp Strings, is it worth
// keeping some scratch mem for this and running our own StrPool?
// TODO for lint violations of names, emit a refactor script
#[macro_use]
extern crate log;
extern crate getopts;
extern crate rustc;
extern crate rustc_driver;
extern crate syntax;
extern crate strings;
use rustc::session::Session;
use rustc::session::config::{self, Input};
use rustc_driver::{driver, CompilerCalls, Compilation};
use syntax::ast;
use syntax::codemap::CodeMap;
use syntax::diagnostics;
use syntax::visit;
use std::path::PathBuf;
use std::collections::HashMap;
use changes::ChangeSet;
use visitor::FmtVisitor;
mod changes;
mod visitor;
mod functions;
mod missed_spans;
mod lists;
mod utils;
mod types;
mod expr;
mod imports;
const IDEAL_WIDTH: usize = 80;
const LEEWAY: usize = 5;
const MAX_WIDTH: usize = 100;
const MIN_STRING: usize = 10;
const TAB_SPACES: usize = 4;
const FN_BRACE_STYLE: BraceStyle = BraceStyle::SameLineWhere;
const FN_RETURN_INDENT: ReturnIndent = ReturnIndent::WithArgs;
// When we get scoped annotations, we should have rustfmt::skip.
const SKIP_ANNOTATION: &'static str = "rustfmt_skip";
#[derive(Copy, Clone)]
pub enum WriteMode {
Overwrite,
// str is the extension of the new file
NewFile(&'static str),
// Write the output to stdout.
Display,
// Return the result as a mapping from filenames to StringBuffers.
Return(&'static Fn(HashMap<String, String>)),
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum BraceStyle {
AlwaysNextLine,
PreferSameLine,
// Prefer same line except where there is a where clause, in which case force
// the brace to the next line.
SameLineWhere,
}
// How to indent a function's return type.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum ReturnIndent {
// Aligned with the arguments
WithArgs,
// Aligned with the where clause
WithWhereClause,
}
// Formatting which depends on the AST.
fn fmt_ast<'a>(krate: &ast::Crate, codemap: &'a CodeMap) -> ChangeSet<'a> {
let mut visitor = FmtVisitor::from_codemap(codemap);
visit::walk_crate(&mut visitor, krate);
let files = codemap.files.borrow();
if let Some(last) = files.last() {
visitor.format_missing(last.end_pos);
}
visitor.changes
}
// Formatting done on a char by char or line by line basis.
// TODO warn on TODOs and FIXMEs without an issue number
// TODO warn on bad license
// TODO other stuff for parity with make tidy
fn fmt_lines(changes: &mut ChangeSet) {
let mut truncate_todo = Vec::new();
// Iterate over the chars in the change set.
for (f, text) in changes.text() {
let mut trims = vec![];
let mut last_wspace: Option<usize> = None;
let mut line_len = 0;
let mut cur_line = 1;
let mut newline_count = 0;
for (c, b) in text.chars() {
if c == '\n' { // TOOD test for \r too
// Check for (and record) trailing whitespace.
if let Some(lw) = last_wspace {
trims.push((cur_line, lw, b));
line_len -= b - lw;
}
// Check for any line width errors we couldn't correct.
if line_len > MAX_WIDTH {
// TODO store the error rather than reporting immediately.
println!("Rustfmt couldn't fix (sorry). {}:{}: line longer than {} characters",
f, cur_line, MAX_WIDTH);
}
line_len = 0;
cur_line += 1;
newline_count += 1;
last_wspace = None;
} else {
newline_count = 0;
line_len += 1;
if c.is_whitespace() {
if last_wspace.is_none() {
last_wspace = Some(b);
}
} else {
last_wspace = None;
}
}
}
if newline_count > 1 {
debug!("track truncate: {} {} {}", f, text.len, newline_count);
truncate_todo.push((f.to_string(), text.len - newline_count + 1))
}
for &(l, _, _) in trims.iter() {
// TODO store the error rather than reporting immediately.
println!("Rustfmt left trailing whitespace at {}:{} (sorry)", f, l);
}
}
for (f, l) in truncate_todo {
changes.get_mut(&f).truncate(l);
}
}
struct RustFmtCalls {
input_path: Option<PathBuf>,
write_mode: WriteMode,
}
impl<'a> CompilerCalls<'a> for RustFmtCalls {
fn early_callback(&mut self,
_: &getopts::Matches,
_: &diagnostics::registry::Registry)
-> Compilation {
Compilation::Continue
}
fn some_input(&mut self,
input: Input,
input_path: Option<PathBuf>)
-> (Input, Option<PathBuf>) {
match input_path {
Some(ref ip) => self.input_path = Some(ip.clone()),
_ => {
// FIXME should handle string input and write to stdout or something
panic!("No input path");
}
}
(input, input_path)
}
fn no_input(&mut self,
_: &getopts::Matches,
_: &config::Options,
_: &Option<PathBuf>,
_: &Option<PathBuf>,
_: &diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)> {
panic!("No input supplied to RustFmt");
}
fn late_callback(&mut self,
_: &getopts::Matches,
_: &Session,
_: &Input,
_: &Option<PathBuf>,
_: &Option<PathBuf>)
-> Compilation {
Compilation::Continue
}
fn build_controller(&mut self, _: &Session) -> driver::CompileController<'a> {
let write_mode = self.write_mode;
let mut control = driver::CompileController::basic();
control.after_parse.stop = Compilation::Stop;
control.after_parse.callback = box move |state| {
let krate = state.krate.unwrap();
let codemap = state.session.codemap();
let mut changes = fmt_ast(krate, codemap);
// For some reason, the codemap does not include terminating newlines
// so we must add one on for each file. This is sad.
changes.append_newlines();
fmt_lines(&mut changes);
// FIXME(#5) Should be user specified whether to show or replace.
let result = changes.write_all_files(write_mode);
match result {
Err(msg) => println!("Error writing files: {}", msg),
Ok(result) => {
if let WriteMode::Return(callback) = write_mode {
callback(result);
}
}
}
};
control
}
}
fn run(args: Vec<String>, write_mode: WriteMode) {
let mut call_ctxt = RustFmtCalls { input_path: None, write_mode: write_mode };
rustc_driver::run_compiler(&args, &mut call_ctxt);
}
#[cfg(not(test))]
fn main() {
let args: Vec<_> = std::env::args().collect();
//run(args, WriteMode::Display);
run(args, WriteMode::Overwrite);
std::env::set_exit_status(0);
// TODO unit tests
// let fmt = ListFormatting {
// tactic: ListTactic::Horizontal,
// separator: ",",
// trailing_separator: SeparatorTactic::Vertical,
// indent: 2,
// h_width: 80,
// v_width: 100,
// };
// let inputs = vec![(format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new()),
// (format!("foo"), String::new())];
// let s = write_list(&inputs, &fmt);
// println!(" {}", s);
} | #[cfg(test)]
mod test {
use std::collections::HashMap;
use std::fs;
use std::io::Read;
use std::sync::atomic;
use super::*;
use super::run;
// For now, the only supported regression tests are idempotent tests - the input and
// output must match exactly.
// FIXME(#28) would be good to check for error messages and fail on them, or at least report.
#[test]
fn idempotent_tests() {
println!("Idempotent tests:");
FAILURES.store(0, atomic::Ordering::Relaxed);
// Get all files in the tests/idem directory
let files = fs::read_dir("tests/idem").unwrap();
// For each file, run rustfmt and collect the output
let mut count = 0;
for entry in files {
let path = entry.unwrap().path();
let file_name = path.to_str().unwrap();
println!("Testing '{}'...", file_name);
run(vec!["rustfmt".to_owned(), file_name.to_owned()], WriteMode::Return(HANDLE_RESULT));
count += 1;
}
// And also dogfood ourselves!
println!("Testing'src/main.rs'...");
run(vec!["rustfmt".to_string(), "src/main.rs".to_string()],
WriteMode::Return(HANDLE_RESULT));
count += 1;
// Display results
let fails = FAILURES.load(atomic::Ordering::Relaxed);
println!("Ran {} idempotent tests; {} failures.", count, fails);
assert!(fails == 0, "{} idempotent tests failed", fails);
}
// 'global' used by sys_tests and handle_result.
static FAILURES: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
// Ick, just needed to get a &'static to handle_result.
static HANDLE_RESULT: &'static Fn(HashMap<String, String>) = &handle_result;
// Compare output to input.
fn handle_result(result: HashMap<String, String>) {
let mut fails = 0;
for file_name in result.keys() {
let mut f = fs::File::open(file_name).unwrap();
let mut text = String::new();
f.read_to_string(&mut text).unwrap();
if result[file_name]!= text {
fails += 1;
println!("Mismatch in {}.", file_name);
println!("{}", result[file_name]);
}
}
if fails > 0 {
FAILURES.fetch_add(1, atomic::Ordering::Relaxed);
}
}
} | random_line_split |
|
main.rs | extern crate chrono;
extern crate fastcgi;
extern crate htmlescape;
extern crate postgres;
extern crate quick_xml;
extern crate regex;
extern crate reqwest;
extern crate rss;
extern crate serde;
extern crate tiny_keccak;
extern crate uuid;
use std::io;
use std::io::Write;
type UtcDateTime = chrono::DateTime<chrono::Utc>;
struct Page {
slug: String,
name: String,
url: String,
next_check: UtcDateTime,
//enabled: bool,
//last_checked: Option<UtcDateTime>,
last_modified: Option<UtcDateTime>,
last_error: Option<String>,
item_id: Option<uuid::Uuid>,
http_etag: Option<String>,
http_body_hash: Option<Vec<u8>>,
delete_regex: Option<String>,
}
// ----------------------------------------------------------------------------
fn main() {
fastcgi::run(|mut req| {
if Some("GET")!= req.param("REQUEST_METHOD").as_ref().map(String::as_ref) {
let _ = req
.stdout()
.write_all(b"Status: 405 Method Not Allowed\n\n");
return;
}
handle_request(&mut req).unwrap_or_else(|err| {
let msg = format!("{:?}", err);
let _ = req
.stdout()
.write_all(b"Status: 500 Internal Server Error\n\n");
let _ = req.stderr().write_all(msg.as_bytes());
})
})
}
fn database_connection() -> Result<postgres::Client, PagefeedError> {
let connection = postgres::Client::connect(database_url().as_ref(), postgres::NoTls)?;
Ok(connection)
}
fn database_url() -> String {
std::env::args().nth(1).unwrap_or_else(|| {
let user = std::env::var("USER").unwrap();
format!("postgres://{}@%2Frun%2Fpostgresql/pagefeed", user)
})
}
fn handle_request(req: &mut fastcgi::Request) -> Result<(), PagefeedError> {
let url = get_url(req)?;
let pathinfo = get_pathinfo(req);
let slug = pathinfo.trim_matches('/');
let mut w = io::BufWriter::new(req.stdout());
if slug.is_empty() {
handle_opml_request(&url, &mut w)
} else {
handle_feed_request(slug, &mut w)
}
}
fn handle_opml_request<W: Write>(url: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let pages = get_enabled_pages(&mut trans)?;
trans.commit()?;
out.write_all(b"Content-Type: application/xml\n\n")?;
build_opml(url, &pages, out)?;
Ok(())
}
fn handle_feed_request<W: Write>(slug: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let page = get_page(&mut trans, slug)?;
let page = page
.map(|page| refresh_page(&mut trans, page))
.transpose()?;
trans.commit()?;
match page {
None => {
out.write_all(b"Status: 404 Not Found\n\n")?;
Ok(())
}
Some(page) => {
let feed = build_feed(&page);
out.write_all(b"Content-Type: application/rss+xml\n\n")?;
feed.write_to(out)?;
Ok(())
}
}
}
fn get_url(req: &fastcgi::Request) -> Result<String, PagefeedError> {
use std::io::{Error, ErrorKind};
let https = match req.param("HTTPS") {
Some(ref s) => s == "on",
_ => false,
};
let server_addr = req
.param("SERVER_ADDR")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_ADDR unset"))?;
let server_port = req
.param("SERVER_PORT")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_PORT unset"))?
.parse::<u16>()
.map_err(|_| Error::new(ErrorKind::Other, "SERVER_PORT invalid"))?;
let mut script_name = req
.param("SCRIPT_NAME")
.ok_or_else(|| Error::new(ErrorKind::Other, "SCRIPT_NAME unset"))?;
if!script_name.starts_with('/') {
script_name.insert(0, '/')
}
if!script_name.ends_with('/') {
script_name.push('/')
}
Ok(match (https, server_port) {
(false, 80) => format!("http://{}{}", server_addr, script_name),
(false, _) => format!("http://{}:{}{}", server_addr, server_port, script_name),
(true, 443) => format!("https://{}{}", server_addr, script_name),
(true, _) => format!("https://{}:{}{}", server_addr, server_port, script_name),
})
}
fn get_pathinfo(req: &fastcgi::Request) -> String {
req.param("PATH_INFO").unwrap_or_default()
}
// ----------------------------------------------------------------------------
#[derive(Debug)]
enum PagefeedError {
Io(io::Error),
Postgres(postgres::error::Error),
QuickXml(quick_xml::de::DeError),
Regex(regex::Error),
Reqwest(reqwest::Error),
Rss(rss::Error),
}
impl From<io::Error> for PagefeedError {
fn from(err: io::Error) -> PagefeedError {
PagefeedError::Io(err)
}
}
impl From<postgres::error::Error> for PagefeedError {
fn from(err: postgres::error::Error) -> PagefeedError {
PagefeedError::Postgres(err)
}
}
impl From<regex::Error> for PagefeedError {
fn from(err: regex::Error) -> PagefeedError {
PagefeedError::Regex(err)
}
}
impl From<reqwest::Error> for PagefeedError {
fn from(err: reqwest::Error) -> PagefeedError {
PagefeedError::Reqwest(err)
}
}
impl From<rss::Error> for PagefeedError {
fn from(err: rss::Error) -> PagefeedError {
PagefeedError::Rss(err)
}
}
impl From<quick_xml::de::DeError> for PagefeedError {
fn from(err: quick_xml::de::DeError) -> PagefeedError {
PagefeedError::QuickXml(err)
}
}
// ----------------------------------------------------------------------------
fn build_feed(page: &Page) -> rss::Channel {
let mut items = vec![];
if page.last_modified.is_some() {
let guid = rss::GuidBuilder::default()
.value(format!("{}", page.item_id.unwrap().urn()))
.permalink(false)
.build();
let item = rss::ItemBuilder::default()
.title(page.name.to_owned())
.description(describe_page_status(page))
.link(page.url.to_owned())
.pub_date(page.last_modified.unwrap().to_rfc2822())
.guid(guid)
.build();
items.push(item);
}
rss::ChannelBuilder::default()
.title(page.name.to_owned())
.link(page.url.to_owned())
.items(items)
.build()
}
fn describe_page_status(page: &Page) -> String {
page.last_error.as_ref().map_or_else(
|| format!("{} was updated.", page.name),
|err| format!("Error while checking {}: {}", page.name, err),
)
}
fn | <W: Write>(url: &str, pages: &[Page], out: &mut W) -> Result<(), PagefeedError> {
#[derive(serde::Serialize)]
#[serde(rename = "opml")]
struct Opml<'a> {
version: &'a str,
head: Head,
body: Body<'a>,
}
#[derive(serde::Serialize)]
struct Head {}
#[derive(serde::Serialize)]
struct Body<'a> {
outline: Vec<Outline<'a>>,
}
#[derive(serde::Serialize)]
struct Outline<'a> {
#[serde(rename = "type")]
typ: &'a str,
text: String,
#[serde(rename = "xmlUrl")]
xml_url: String,
#[serde(rename = "htmlUrl")]
html_url: &'a str,
}
write!(out, "{}", quick_xml::se::to_string(
&Opml {
version: "2.0",
head: Head {},
body: Body {
outline: pages
.iter()
.map(|page| Outline {
typ: "rss",
text: htmlescape::encode_minimal(&page.name),
xml_url: format!("{}{}", url, page.slug),
html_url: &page.url,
})
.collect(),
},
},
)?)?;
Ok(())
}
// ----------------------------------------------------------------------------
#[derive(Clone)]
enum PageStatus {
Unmodified,
Modified {
body_hash: Vec<u8>,
etag: Option<String>,
},
FetchError(String),
}
fn refresh_page(
conn: &mut postgres::Transaction,
page: Page,
) -> Result<Page, postgres::error::Error> {
if!page_needs_checking(&page) {
return Ok(page);
}
let status = check_page(&page);
match status {
PageStatus::Unmodified => update_page_unchanged(conn, &page)?,
PageStatus::Modified {
ref body_hash,
ref etag,
} => update_page_changed(conn, &page, etag, body_hash)?,
PageStatus::FetchError(ref error) => update_page_error(conn, &page, error)?,
}
get_page(conn, &page.slug)
.transpose()
.expect("page disappeared??")
}
fn page_needs_checking(page: &Page) -> bool {
chrono::Utc::now() >= page.next_check
}
fn check_page(page: &Page) -> PageStatus {
use reqwest::header;
use reqwest::StatusCode;
let client = reqwest::blocking::Client::new();
let mut request = client
.get(&page.url)
.header(header::USER_AGENT, "Mozilla/5.0");
if let Some(ref etag) = page.http_etag {
request = request.header(header::IF_NONE_MATCH, etag.to_string());
}
let status = request
.send()
.map_err(PagefeedError::from)
.and_then(|mut response| {
if response.status() == StatusCode::NOT_MODIFIED {
Ok(PageStatus::Unmodified)
} else {
let etag = response
.headers()
.get(header::ETAG)
.and_then(|x| x.to_str().ok())
.map(str::to_string);
let body_hash = hash(page, &mut response)?;
Ok(PageStatus::Modified { body_hash, etag })
}
})
.unwrap_or_else(|err| PageStatus::FetchError(format!("{:?}", err)));
match status {
PageStatus::Modified { ref body_hash,.. }
if Some(body_hash) == page.http_body_hash.as_ref() =>
{
PageStatus::Unmodified
}
PageStatus::FetchError(ref error) if Some(error) == page.last_error.as_ref() => {
PageStatus::Unmodified
}
_ => status,
}
}
// ----------------------------------------------------------------------------
fn hash(page: &Page, r: &mut dyn io::Read) -> Result<Vec<u8>, PagefeedError> {
let mut buf = Vec::new();
r.read_to_end(&mut buf)?;
if let Some(delete_regex) = page.delete_regex.as_ref() {
let re = regex::bytes::Regex::new(delete_regex)?;
buf = re.replace_all(&buf, &b""[..]).into_owned();
}
use tiny_keccak::{Hasher, Sha3};
let mut sha3 = Sha3::v256();
sha3.update(&buf);
let mut res: [u8; 32] = [0; 32];
sha3.finalize(&mut res);
Ok(res.to_vec())
}
// ----------------------------------------------------------------------------
fn get_enabled_pages(
conn: &mut postgres::Transaction,
) -> Result<Vec<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled
";
conn.query(query, &[])
.map(|rows| rows.iter().map(instantiate_page).collect())
}
fn get_page(
conn: &mut postgres::Transaction,
slug: &str,
) -> Result<Option<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled and slug = $1
";
conn.query(query, &[&slug])
.map(|rows| rows.get(0).map(instantiate_page))
}
fn instantiate_page(row: &postgres::row::Row) -> Page {
Page {
slug: row.get("slug"),
name: row.get("name"),
url: row.get("url"),
//enabled: row.get("enabled"),
delete_regex: row.get("delete_regex"),
next_check: row.get("next_check"),
//last_checked: row.get("last_checked"),
last_modified: row.get("last_modified"),
last_error: row.get("last_error"),
item_id: row.get("item_id"),
http_etag: row.get("http_etag"),
http_body_hash: row.get("http_body_hash"),
}
}
fn update_page_unchanged(
conn: &mut postgres::Transaction,
page: &Page,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp
where slug = $1
";
conn.execute(query, &[&page.slug])?;
Ok(())
}
fn update_page_changed(
conn: &mut postgres::Transaction,
page: &Page,
new_etag: &Option<String>,
new_hash: &Vec<u8>,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = null,
item_id = $1,
http_etag = $2,
http_body_hash = $3
where slug = $4
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[&uuid, new_etag, new_hash, &page.slug])?;
Ok(())
}
fn update_page_error(
conn: &mut postgres::Transaction,
page: &Page,
error: &String,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = $1,
item_id = $2,
http_etag = null,
http_body_hash = null
where slug = $3
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[error, &uuid, &page.slug])?;
Ok(())
}
// ----------------------------------------------------------------------------
| build_opml | identifier_name |
main.rs | extern crate chrono;
extern crate fastcgi;
extern crate htmlescape;
extern crate postgres;
extern crate quick_xml;
extern crate regex;
extern crate reqwest;
extern crate rss;
extern crate serde;
extern crate tiny_keccak;
extern crate uuid;
use std::io;
use std::io::Write;
type UtcDateTime = chrono::DateTime<chrono::Utc>;
struct Page {
slug: String,
name: String,
url: String,
next_check: UtcDateTime,
//enabled: bool,
//last_checked: Option<UtcDateTime>,
last_modified: Option<UtcDateTime>,
last_error: Option<String>,
item_id: Option<uuid::Uuid>,
http_etag: Option<String>,
http_body_hash: Option<Vec<u8>>,
delete_regex: Option<String>,
}
// ----------------------------------------------------------------------------
fn main() {
fastcgi::run(|mut req| {
if Some("GET")!= req.param("REQUEST_METHOD").as_ref().map(String::as_ref) {
let _ = req
.stdout()
.write_all(b"Status: 405 Method Not Allowed\n\n");
return;
}
handle_request(&mut req).unwrap_or_else(|err| {
let msg = format!("{:?}", err);
let _ = req
.stdout()
.write_all(b"Status: 500 Internal Server Error\n\n");
let _ = req.stderr().write_all(msg.as_bytes());
})
})
}
fn database_connection() -> Result<postgres::Client, PagefeedError> {
let connection = postgres::Client::connect(database_url().as_ref(), postgres::NoTls)?;
Ok(connection)
}
fn database_url() -> String {
std::env::args().nth(1).unwrap_or_else(|| {
let user = std::env::var("USER").unwrap();
format!("postgres://{}@%2Frun%2Fpostgresql/pagefeed", user)
})
}
fn handle_request(req: &mut fastcgi::Request) -> Result<(), PagefeedError> {
let url = get_url(req)?;
let pathinfo = get_pathinfo(req);
let slug = pathinfo.trim_matches('/');
let mut w = io::BufWriter::new(req.stdout());
if slug.is_empty() {
handle_opml_request(&url, &mut w)
} else {
handle_feed_request(slug, &mut w)
}
}
fn handle_opml_request<W: Write>(url: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let pages = get_enabled_pages(&mut trans)?;
trans.commit()?;
out.write_all(b"Content-Type: application/xml\n\n")?;
build_opml(url, &pages, out)?;
Ok(())
}
fn handle_feed_request<W: Write>(slug: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let page = get_page(&mut trans, slug)?;
let page = page
.map(|page| refresh_page(&mut trans, page))
.transpose()?;
trans.commit()?;
match page {
None => {
out.write_all(b"Status: 404 Not Found\n\n")?;
Ok(())
}
Some(page) => {
let feed = build_feed(&page);
out.write_all(b"Content-Type: application/rss+xml\n\n")?;
feed.write_to(out)?;
Ok(())
}
}
}
fn get_url(req: &fastcgi::Request) -> Result<String, PagefeedError> {
use std::io::{Error, ErrorKind};
let https = match req.param("HTTPS") {
Some(ref s) => s == "on",
_ => false,
};
let server_addr = req
.param("SERVER_ADDR")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_ADDR unset"))?;
let server_port = req
.param("SERVER_PORT")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_PORT unset"))?
.parse::<u16>()
.map_err(|_| Error::new(ErrorKind::Other, "SERVER_PORT invalid"))?;
let mut script_name = req
.param("SCRIPT_NAME")
.ok_or_else(|| Error::new(ErrorKind::Other, "SCRIPT_NAME unset"))?;
if!script_name.starts_with('/') {
script_name.insert(0, '/')
}
if!script_name.ends_with('/') {
script_name.push('/')
}
Ok(match (https, server_port) {
(false, 80) => format!("http://{}{}", server_addr, script_name),
(false, _) => format!("http://{}:{}{}", server_addr, server_port, script_name),
(true, 443) => format!("https://{}{}", server_addr, script_name),
(true, _) => format!("https://{}:{}{}", server_addr, server_port, script_name),
})
}
fn get_pathinfo(req: &fastcgi::Request) -> String {
req.param("PATH_INFO").unwrap_or_default()
}
// ----------------------------------------------------------------------------
#[derive(Debug)]
enum PagefeedError {
Io(io::Error),
Postgres(postgres::error::Error),
QuickXml(quick_xml::de::DeError),
Regex(regex::Error),
Reqwest(reqwest::Error),
Rss(rss::Error),
}
impl From<io::Error> for PagefeedError {
fn from(err: io::Error) -> PagefeedError {
PagefeedError::Io(err)
}
}
impl From<postgres::error::Error> for PagefeedError {
fn from(err: postgres::error::Error) -> PagefeedError {
PagefeedError::Postgres(err)
}
}
impl From<regex::Error> for PagefeedError {
fn from(err: regex::Error) -> PagefeedError {
PagefeedError::Regex(err)
}
}
impl From<reqwest::Error> for PagefeedError {
fn from(err: reqwest::Error) -> PagefeedError {
PagefeedError::Reqwest(err)
}
}
impl From<rss::Error> for PagefeedError {
fn from(err: rss::Error) -> PagefeedError {
PagefeedError::Rss(err)
}
}
impl From<quick_xml::de::DeError> for PagefeedError {
fn from(err: quick_xml::de::DeError) -> PagefeedError {
PagefeedError::QuickXml(err)
}
}
// ----------------------------------------------------------------------------
fn build_feed(page: &Page) -> rss::Channel {
let mut items = vec![];
if page.last_modified.is_some() {
let guid = rss::GuidBuilder::default()
.value(format!("{}", page.item_id.unwrap().urn()))
.permalink(false)
.build();
let item = rss::ItemBuilder::default()
.title(page.name.to_owned())
.description(describe_page_status(page))
.link(page.url.to_owned())
.pub_date(page.last_modified.unwrap().to_rfc2822())
.guid(guid)
.build();
items.push(item);
}
rss::ChannelBuilder::default()
.title(page.name.to_owned())
.link(page.url.to_owned())
.items(items)
.build()
}
fn describe_page_status(page: &Page) -> String {
page.last_error.as_ref().map_or_else(
|| format!("{} was updated.", page.name),
|err| format!("Error while checking {}: {}", page.name, err),
)
}
fn build_opml<W: Write>(url: &str, pages: &[Page], out: &mut W) -> Result<(), PagefeedError> {
#[derive(serde::Serialize)]
#[serde(rename = "opml")]
struct Opml<'a> {
version: &'a str,
head: Head,
body: Body<'a>,
}
#[derive(serde::Serialize)]
struct Head {}
#[derive(serde::Serialize)]
struct Body<'a> {
outline: Vec<Outline<'a>>,
}
#[derive(serde::Serialize)]
struct Outline<'a> {
#[serde(rename = "type")]
typ: &'a str,
text: String,
#[serde(rename = "xmlUrl")]
xml_url: String,
#[serde(rename = "htmlUrl")]
html_url: &'a str,
}
write!(out, "{}", quick_xml::se::to_string(
&Opml {
version: "2.0",
head: Head {},
body: Body {
outline: pages
.iter()
.map(|page| Outline {
typ: "rss",
text: htmlescape::encode_minimal(&page.name),
xml_url: format!("{}{}", url, page.slug),
html_url: &page.url,
})
.collect(),
},
},
)?)?;
Ok(())
}
// ----------------------------------------------------------------------------
#[derive(Clone)]
enum PageStatus {
Unmodified,
Modified {
body_hash: Vec<u8>,
etag: Option<String>,
},
FetchError(String),
}
fn refresh_page(
conn: &mut postgres::Transaction,
page: Page,
) -> Result<Page, postgres::error::Error> {
if!page_needs_checking(&page) {
return Ok(page);
}
let status = check_page(&page);
match status {
PageStatus::Unmodified => update_page_unchanged(conn, &page)?,
PageStatus::Modified {
ref body_hash,
ref etag,
} => update_page_changed(conn, &page, etag, body_hash)?,
PageStatus::FetchError(ref error) => update_page_error(conn, &page, error)?,
}
get_page(conn, &page.slug)
.transpose()
.expect("page disappeared??")
}
fn page_needs_checking(page: &Page) -> bool {
chrono::Utc::now() >= page.next_check
}
fn check_page(page: &Page) -> PageStatus {
use reqwest::header;
use reqwest::StatusCode;
let client = reqwest::blocking::Client::new();
let mut request = client
.get(&page.url)
.header(header::USER_AGENT, "Mozilla/5.0");
if let Some(ref etag) = page.http_etag {
request = request.header(header::IF_NONE_MATCH, etag.to_string());
}
let status = request
.send()
.map_err(PagefeedError::from)
.and_then(|mut response| {
if response.status() == StatusCode::NOT_MODIFIED {
Ok(PageStatus::Unmodified)
} else {
let etag = response
.headers()
.get(header::ETAG)
.and_then(|x| x.to_str().ok())
.map(str::to_string);
let body_hash = hash(page, &mut response)?;
Ok(PageStatus::Modified { body_hash, etag })
}
})
.unwrap_or_else(|err| PageStatus::FetchError(format!("{:?}", err)));
match status {
PageStatus::Modified { ref body_hash,.. }
if Some(body_hash) == page.http_body_hash.as_ref() =>
{
PageStatus::Unmodified
}
PageStatus::FetchError(ref error) if Some(error) == page.last_error.as_ref() => {
PageStatus::Unmodified
}
_ => status,
}
}
// ----------------------------------------------------------------------------
fn hash(page: &Page, r: &mut dyn io::Read) -> Result<Vec<u8>, PagefeedError> {
let mut buf = Vec::new();
r.read_to_end(&mut buf)?;
if let Some(delete_regex) = page.delete_regex.as_ref() {
let re = regex::bytes::Regex::new(delete_regex)?;
buf = re.replace_all(&buf, &b""[..]).into_owned();
}
use tiny_keccak::{Hasher, Sha3};
let mut sha3 = Sha3::v256();
sha3.update(&buf);
let mut res: [u8; 32] = [0; 32];
sha3.finalize(&mut res);
Ok(res.to_vec())
}
// ----------------------------------------------------------------------------
fn get_enabled_pages(
conn: &mut postgres::Transaction,
) -> Result<Vec<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled
";
conn.query(query, &[])
.map(|rows| rows.iter().map(instantiate_page).collect())
}
fn get_page(
conn: &mut postgres::Transaction,
slug: &str,
) -> Result<Option<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled and slug = $1
";
conn.query(query, &[&slug])
.map(|rows| rows.get(0).map(instantiate_page))
}
fn instantiate_page(row: &postgres::row::Row) -> Page {
Page {
slug: row.get("slug"),
name: row.get("name"),
url: row.get("url"),
//enabled: row.get("enabled"),
delete_regex: row.get("delete_regex"),
next_check: row.get("next_check"),
//last_checked: row.get("last_checked"),
last_modified: row.get("last_modified"),
last_error: row.get("last_error"),
item_id: row.get("item_id"),
http_etag: row.get("http_etag"),
http_body_hash: row.get("http_body_hash"),
}
}
fn update_page_unchanged(
conn: &mut postgres::Transaction,
page: &Page,
) -> Result<(), postgres::error::Error> |
fn update_page_changed(
conn: &mut postgres::Transaction,
page: &Page,
new_etag: &Option<String>,
new_hash: &Vec<u8>,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = null,
item_id = $1,
http_etag = $2,
http_body_hash = $3
where slug = $4
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[&uuid, new_etag, new_hash, &page.slug])?;
Ok(())
}
fn update_page_error(
conn: &mut postgres::Transaction,
page: &Page,
error: &String,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = $1,
item_id = $2,
http_etag = null,
http_body_hash = null
where slug = $3
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[error, &uuid, &page.slug])?;
Ok(())
}
// ----------------------------------------------------------------------------
| {
let query = "
update pages
set last_checked = current_timestamp
where slug = $1
";
conn.execute(query, &[&page.slug])?;
Ok(())
} | identifier_body |
main.rs | extern crate chrono;
extern crate fastcgi;
extern crate htmlescape;
extern crate postgres;
extern crate quick_xml;
extern crate regex;
extern crate reqwest;
extern crate rss;
extern crate serde;
extern crate tiny_keccak;
extern crate uuid;
use std::io;
use std::io::Write;
type UtcDateTime = chrono::DateTime<chrono::Utc>;
struct Page {
slug: String,
name: String,
url: String,
next_check: UtcDateTime,
//enabled: bool,
//last_checked: Option<UtcDateTime>,
last_modified: Option<UtcDateTime>,
last_error: Option<String>,
item_id: Option<uuid::Uuid>,
http_etag: Option<String>,
http_body_hash: Option<Vec<u8>>,
delete_regex: Option<String>,
}
// ----------------------------------------------------------------------------
fn main() {
fastcgi::run(|mut req| {
if Some("GET")!= req.param("REQUEST_METHOD").as_ref().map(String::as_ref) {
let _ = req
.stdout()
.write_all(b"Status: 405 Method Not Allowed\n\n");
return;
}
handle_request(&mut req).unwrap_or_else(|err| {
let msg = format!("{:?}", err);
let _ = req
.stdout()
.write_all(b"Status: 500 Internal Server Error\n\n");
let _ = req.stderr().write_all(msg.as_bytes());
})
})
}
fn database_connection() -> Result<postgres::Client, PagefeedError> {
let connection = postgres::Client::connect(database_url().as_ref(), postgres::NoTls)?;
Ok(connection)
}
fn database_url() -> String {
std::env::args().nth(1).unwrap_or_else(|| {
let user = std::env::var("USER").unwrap();
format!("postgres://{}@%2Frun%2Fpostgresql/pagefeed", user)
})
}
fn handle_request(req: &mut fastcgi::Request) -> Result<(), PagefeedError> {
let url = get_url(req)?;
let pathinfo = get_pathinfo(req);
let slug = pathinfo.trim_matches('/');
let mut w = io::BufWriter::new(req.stdout());
if slug.is_empty() {
handle_opml_request(&url, &mut w)
} else {
handle_feed_request(slug, &mut w)
}
}
fn handle_opml_request<W: Write>(url: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let pages = get_enabled_pages(&mut trans)?;
trans.commit()?;
out.write_all(b"Content-Type: application/xml\n\n")?;
build_opml(url, &pages, out)?;
Ok(())
}
fn handle_feed_request<W: Write>(slug: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let page = get_page(&mut trans, slug)?;
let page = page
.map(|page| refresh_page(&mut trans, page))
.transpose()?;
trans.commit()?;
match page {
None => {
out.write_all(b"Status: 404 Not Found\n\n")?;
Ok(())
}
Some(page) => {
let feed = build_feed(&page);
out.write_all(b"Content-Type: application/rss+xml\n\n")?;
feed.write_to(out)?;
Ok(())
}
}
}
fn get_url(req: &fastcgi::Request) -> Result<String, PagefeedError> {
use std::io::{Error, ErrorKind};
let https = match req.param("HTTPS") {
Some(ref s) => s == "on",
_ => false,
};
let server_addr = req
.param("SERVER_ADDR")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_ADDR unset"))?;
let server_port = req
.param("SERVER_PORT")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_PORT unset"))?
.parse::<u16>()
.map_err(|_| Error::new(ErrorKind::Other, "SERVER_PORT invalid"))?;
let mut script_name = req
.param("SCRIPT_NAME")
.ok_or_else(|| Error::new(ErrorKind::Other, "SCRIPT_NAME unset"))?;
if!script_name.starts_with('/') {
script_name.insert(0, '/')
}
if!script_name.ends_with('/') {
script_name.push('/')
}
Ok(match (https, server_port) {
(false, 80) => format!("http://{}{}", server_addr, script_name),
(false, _) => format!("http://{}:{}{}", server_addr, server_port, script_name),
(true, 443) => format!("https://{}{}", server_addr, script_name),
(true, _) => format!("https://{}:{}{}", server_addr, server_port, script_name),
})
}
fn get_pathinfo(req: &fastcgi::Request) -> String {
req.param("PATH_INFO").unwrap_or_default()
}
// ----------------------------------------------------------------------------
#[derive(Debug)]
enum PagefeedError {
Io(io::Error),
Postgres(postgres::error::Error),
QuickXml(quick_xml::de::DeError),
Regex(regex::Error),
Reqwest(reqwest::Error),
Rss(rss::Error),
}
impl From<io::Error> for PagefeedError {
fn from(err: io::Error) -> PagefeedError {
PagefeedError::Io(err)
}
}
impl From<postgres::error::Error> for PagefeedError {
fn from(err: postgres::error::Error) -> PagefeedError {
PagefeedError::Postgres(err)
}
}
impl From<regex::Error> for PagefeedError {
fn from(err: regex::Error) -> PagefeedError {
PagefeedError::Regex(err)
}
}
impl From<reqwest::Error> for PagefeedError {
fn from(err: reqwest::Error) -> PagefeedError {
PagefeedError::Reqwest(err)
}
}
impl From<rss::Error> for PagefeedError {
fn from(err: rss::Error) -> PagefeedError {
PagefeedError::Rss(err)
}
}
impl From<quick_xml::de::DeError> for PagefeedError {
fn from(err: quick_xml::de::DeError) -> PagefeedError {
PagefeedError::QuickXml(err)
}
}
// ----------------------------------------------------------------------------
fn build_feed(page: &Page) -> rss::Channel {
let mut items = vec![];
if page.last_modified.is_some() {
let guid = rss::GuidBuilder::default()
.value(format!("{}", page.item_id.unwrap().urn()))
.permalink(false)
.build();
let item = rss::ItemBuilder::default()
.title(page.name.to_owned())
.description(describe_page_status(page))
.link(page.url.to_owned())
.pub_date(page.last_modified.unwrap().to_rfc2822())
.guid(guid)
.build();
items.push(item);
}
rss::ChannelBuilder::default()
.title(page.name.to_owned())
.link(page.url.to_owned())
.items(items)
.build()
} | fn describe_page_status(page: &Page) -> String {
page.last_error.as_ref().map_or_else(
|| format!("{} was updated.", page.name),
|err| format!("Error while checking {}: {}", page.name, err),
)
}
fn build_opml<W: Write>(url: &str, pages: &[Page], out: &mut W) -> Result<(), PagefeedError> {
#[derive(serde::Serialize)]
#[serde(rename = "opml")]
struct Opml<'a> {
version: &'a str,
head: Head,
body: Body<'a>,
}
#[derive(serde::Serialize)]
struct Head {}
#[derive(serde::Serialize)]
struct Body<'a> {
outline: Vec<Outline<'a>>,
}
#[derive(serde::Serialize)]
struct Outline<'a> {
#[serde(rename = "type")]
typ: &'a str,
text: String,
#[serde(rename = "xmlUrl")]
xml_url: String,
#[serde(rename = "htmlUrl")]
html_url: &'a str,
}
write!(out, "{}", quick_xml::se::to_string(
&Opml {
version: "2.0",
head: Head {},
body: Body {
outline: pages
.iter()
.map(|page| Outline {
typ: "rss",
text: htmlescape::encode_minimal(&page.name),
xml_url: format!("{}{}", url, page.slug),
html_url: &page.url,
})
.collect(),
},
},
)?)?;
Ok(())
}
// ----------------------------------------------------------------------------
#[derive(Clone)]
enum PageStatus {
Unmodified,
Modified {
body_hash: Vec<u8>,
etag: Option<String>,
},
FetchError(String),
}
fn refresh_page(
conn: &mut postgres::Transaction,
page: Page,
) -> Result<Page, postgres::error::Error> {
if!page_needs_checking(&page) {
return Ok(page);
}
let status = check_page(&page);
match status {
PageStatus::Unmodified => update_page_unchanged(conn, &page)?,
PageStatus::Modified {
ref body_hash,
ref etag,
} => update_page_changed(conn, &page, etag, body_hash)?,
PageStatus::FetchError(ref error) => update_page_error(conn, &page, error)?,
}
get_page(conn, &page.slug)
.transpose()
.expect("page disappeared??")
}
fn page_needs_checking(page: &Page) -> bool {
chrono::Utc::now() >= page.next_check
}
fn check_page(page: &Page) -> PageStatus {
use reqwest::header;
use reqwest::StatusCode;
let client = reqwest::blocking::Client::new();
let mut request = client
.get(&page.url)
.header(header::USER_AGENT, "Mozilla/5.0");
if let Some(ref etag) = page.http_etag {
request = request.header(header::IF_NONE_MATCH, etag.to_string());
}
let status = request
.send()
.map_err(PagefeedError::from)
.and_then(|mut response| {
if response.status() == StatusCode::NOT_MODIFIED {
Ok(PageStatus::Unmodified)
} else {
let etag = response
.headers()
.get(header::ETAG)
.and_then(|x| x.to_str().ok())
.map(str::to_string);
let body_hash = hash(page, &mut response)?;
Ok(PageStatus::Modified { body_hash, etag })
}
})
.unwrap_or_else(|err| PageStatus::FetchError(format!("{:?}", err)));
match status {
PageStatus::Modified { ref body_hash,.. }
if Some(body_hash) == page.http_body_hash.as_ref() =>
{
PageStatus::Unmodified
}
PageStatus::FetchError(ref error) if Some(error) == page.last_error.as_ref() => {
PageStatus::Unmodified
}
_ => status,
}
}
// ----------------------------------------------------------------------------
fn hash(page: &Page, r: &mut dyn io::Read) -> Result<Vec<u8>, PagefeedError> {
let mut buf = Vec::new();
r.read_to_end(&mut buf)?;
if let Some(delete_regex) = page.delete_regex.as_ref() {
let re = regex::bytes::Regex::new(delete_regex)?;
buf = re.replace_all(&buf, &b""[..]).into_owned();
}
use tiny_keccak::{Hasher, Sha3};
let mut sha3 = Sha3::v256();
sha3.update(&buf);
let mut res: [u8; 32] = [0; 32];
sha3.finalize(&mut res);
Ok(res.to_vec())
}
// ----------------------------------------------------------------------------
fn get_enabled_pages(
conn: &mut postgres::Transaction,
) -> Result<Vec<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled
";
conn.query(query, &[])
.map(|rows| rows.iter().map(instantiate_page).collect())
}
fn get_page(
conn: &mut postgres::Transaction,
slug: &str,
) -> Result<Option<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled and slug = $1
";
conn.query(query, &[&slug])
.map(|rows| rows.get(0).map(instantiate_page))
}
fn instantiate_page(row: &postgres::row::Row) -> Page {
Page {
slug: row.get("slug"),
name: row.get("name"),
url: row.get("url"),
//enabled: row.get("enabled"),
delete_regex: row.get("delete_regex"),
next_check: row.get("next_check"),
//last_checked: row.get("last_checked"),
last_modified: row.get("last_modified"),
last_error: row.get("last_error"),
item_id: row.get("item_id"),
http_etag: row.get("http_etag"),
http_body_hash: row.get("http_body_hash"),
}
}
fn update_page_unchanged(
conn: &mut postgres::Transaction,
page: &Page,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp
where slug = $1
";
conn.execute(query, &[&page.slug])?;
Ok(())
}
fn update_page_changed(
conn: &mut postgres::Transaction,
page: &Page,
new_etag: &Option<String>,
new_hash: &Vec<u8>,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = null,
item_id = $1,
http_etag = $2,
http_body_hash = $3
where slug = $4
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[&uuid, new_etag, new_hash, &page.slug])?;
Ok(())
}
fn update_page_error(
conn: &mut postgres::Transaction,
page: &Page,
error: &String,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = $1,
item_id = $2,
http_etag = null,
http_body_hash = null
where slug = $3
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[error, &uuid, &page.slug])?;
Ok(())
}
// ---------------------------------------------------------------------------- | random_line_split |
|
main.rs | extern crate chrono;
extern crate fastcgi;
extern crate htmlescape;
extern crate postgres;
extern crate quick_xml;
extern crate regex;
extern crate reqwest;
extern crate rss;
extern crate serde;
extern crate tiny_keccak;
extern crate uuid;
use std::io;
use std::io::Write;
type UtcDateTime = chrono::DateTime<chrono::Utc>;
struct Page {
slug: String,
name: String,
url: String,
next_check: UtcDateTime,
//enabled: bool,
//last_checked: Option<UtcDateTime>,
last_modified: Option<UtcDateTime>,
last_error: Option<String>,
item_id: Option<uuid::Uuid>,
http_etag: Option<String>,
http_body_hash: Option<Vec<u8>>,
delete_regex: Option<String>,
}
// ----------------------------------------------------------------------------
fn main() {
fastcgi::run(|mut req| {
if Some("GET")!= req.param("REQUEST_METHOD").as_ref().map(String::as_ref) {
let _ = req
.stdout()
.write_all(b"Status: 405 Method Not Allowed\n\n");
return;
}
handle_request(&mut req).unwrap_or_else(|err| {
let msg = format!("{:?}", err);
let _ = req
.stdout()
.write_all(b"Status: 500 Internal Server Error\n\n");
let _ = req.stderr().write_all(msg.as_bytes());
})
})
}
fn database_connection() -> Result<postgres::Client, PagefeedError> {
let connection = postgres::Client::connect(database_url().as_ref(), postgres::NoTls)?;
Ok(connection)
}
fn database_url() -> String {
std::env::args().nth(1).unwrap_or_else(|| {
let user = std::env::var("USER").unwrap();
format!("postgres://{}@%2Frun%2Fpostgresql/pagefeed", user)
})
}
fn handle_request(req: &mut fastcgi::Request) -> Result<(), PagefeedError> {
let url = get_url(req)?;
let pathinfo = get_pathinfo(req);
let slug = pathinfo.trim_matches('/');
let mut w = io::BufWriter::new(req.stdout());
if slug.is_empty() | else {
handle_feed_request(slug, &mut w)
}
}
fn handle_opml_request<W: Write>(url: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let pages = get_enabled_pages(&mut trans)?;
trans.commit()?;
out.write_all(b"Content-Type: application/xml\n\n")?;
build_opml(url, &pages, out)?;
Ok(())
}
fn handle_feed_request<W: Write>(slug: &str, out: &mut W) -> Result<(), PagefeedError> {
let mut conn = database_connection()?;
let mut trans = conn.transaction()?;
let page = get_page(&mut trans, slug)?;
let page = page
.map(|page| refresh_page(&mut trans, page))
.transpose()?;
trans.commit()?;
match page {
None => {
out.write_all(b"Status: 404 Not Found\n\n")?;
Ok(())
}
Some(page) => {
let feed = build_feed(&page);
out.write_all(b"Content-Type: application/rss+xml\n\n")?;
feed.write_to(out)?;
Ok(())
}
}
}
fn get_url(req: &fastcgi::Request) -> Result<String, PagefeedError> {
use std::io::{Error, ErrorKind};
let https = match req.param("HTTPS") {
Some(ref s) => s == "on",
_ => false,
};
let server_addr = req
.param("SERVER_ADDR")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_ADDR unset"))?;
let server_port = req
.param("SERVER_PORT")
.ok_or_else(|| Error::new(ErrorKind::Other, "SERVER_PORT unset"))?
.parse::<u16>()
.map_err(|_| Error::new(ErrorKind::Other, "SERVER_PORT invalid"))?;
let mut script_name = req
.param("SCRIPT_NAME")
.ok_or_else(|| Error::new(ErrorKind::Other, "SCRIPT_NAME unset"))?;
if!script_name.starts_with('/') {
script_name.insert(0, '/')
}
if!script_name.ends_with('/') {
script_name.push('/')
}
Ok(match (https, server_port) {
(false, 80) => format!("http://{}{}", server_addr, script_name),
(false, _) => format!("http://{}:{}{}", server_addr, server_port, script_name),
(true, 443) => format!("https://{}{}", server_addr, script_name),
(true, _) => format!("https://{}:{}{}", server_addr, server_port, script_name),
})
}
fn get_pathinfo(req: &fastcgi::Request) -> String {
req.param("PATH_INFO").unwrap_or_default()
}
// ----------------------------------------------------------------------------
#[derive(Debug)]
enum PagefeedError {
Io(io::Error),
Postgres(postgres::error::Error),
QuickXml(quick_xml::de::DeError),
Regex(regex::Error),
Reqwest(reqwest::Error),
Rss(rss::Error),
}
impl From<io::Error> for PagefeedError {
fn from(err: io::Error) -> PagefeedError {
PagefeedError::Io(err)
}
}
impl From<postgres::error::Error> for PagefeedError {
fn from(err: postgres::error::Error) -> PagefeedError {
PagefeedError::Postgres(err)
}
}
impl From<regex::Error> for PagefeedError {
fn from(err: regex::Error) -> PagefeedError {
PagefeedError::Regex(err)
}
}
impl From<reqwest::Error> for PagefeedError {
fn from(err: reqwest::Error) -> PagefeedError {
PagefeedError::Reqwest(err)
}
}
impl From<rss::Error> for PagefeedError {
fn from(err: rss::Error) -> PagefeedError {
PagefeedError::Rss(err)
}
}
impl From<quick_xml::de::DeError> for PagefeedError {
fn from(err: quick_xml::de::DeError) -> PagefeedError {
PagefeedError::QuickXml(err)
}
}
// ----------------------------------------------------------------------------
fn build_feed(page: &Page) -> rss::Channel {
let mut items = vec![];
if page.last_modified.is_some() {
let guid = rss::GuidBuilder::default()
.value(format!("{}", page.item_id.unwrap().urn()))
.permalink(false)
.build();
let item = rss::ItemBuilder::default()
.title(page.name.to_owned())
.description(describe_page_status(page))
.link(page.url.to_owned())
.pub_date(page.last_modified.unwrap().to_rfc2822())
.guid(guid)
.build();
items.push(item);
}
rss::ChannelBuilder::default()
.title(page.name.to_owned())
.link(page.url.to_owned())
.items(items)
.build()
}
fn describe_page_status(page: &Page) -> String {
page.last_error.as_ref().map_or_else(
|| format!("{} was updated.", page.name),
|err| format!("Error while checking {}: {}", page.name, err),
)
}
fn build_opml<W: Write>(url: &str, pages: &[Page], out: &mut W) -> Result<(), PagefeedError> {
#[derive(serde::Serialize)]
#[serde(rename = "opml")]
struct Opml<'a> {
version: &'a str,
head: Head,
body: Body<'a>,
}
#[derive(serde::Serialize)]
struct Head {}
#[derive(serde::Serialize)]
struct Body<'a> {
outline: Vec<Outline<'a>>,
}
#[derive(serde::Serialize)]
struct Outline<'a> {
#[serde(rename = "type")]
typ: &'a str,
text: String,
#[serde(rename = "xmlUrl")]
xml_url: String,
#[serde(rename = "htmlUrl")]
html_url: &'a str,
}
write!(out, "{}", quick_xml::se::to_string(
&Opml {
version: "2.0",
head: Head {},
body: Body {
outline: pages
.iter()
.map(|page| Outline {
typ: "rss",
text: htmlescape::encode_minimal(&page.name),
xml_url: format!("{}{}", url, page.slug),
html_url: &page.url,
})
.collect(),
},
},
)?)?;
Ok(())
}
// ----------------------------------------------------------------------------
#[derive(Clone)]
enum PageStatus {
Unmodified,
Modified {
body_hash: Vec<u8>,
etag: Option<String>,
},
FetchError(String),
}
fn refresh_page(
conn: &mut postgres::Transaction,
page: Page,
) -> Result<Page, postgres::error::Error> {
if!page_needs_checking(&page) {
return Ok(page);
}
let status = check_page(&page);
match status {
PageStatus::Unmodified => update_page_unchanged(conn, &page)?,
PageStatus::Modified {
ref body_hash,
ref etag,
} => update_page_changed(conn, &page, etag, body_hash)?,
PageStatus::FetchError(ref error) => update_page_error(conn, &page, error)?,
}
get_page(conn, &page.slug)
.transpose()
.expect("page disappeared??")
}
fn page_needs_checking(page: &Page) -> bool {
chrono::Utc::now() >= page.next_check
}
fn check_page(page: &Page) -> PageStatus {
use reqwest::header;
use reqwest::StatusCode;
let client = reqwest::blocking::Client::new();
let mut request = client
.get(&page.url)
.header(header::USER_AGENT, "Mozilla/5.0");
if let Some(ref etag) = page.http_etag {
request = request.header(header::IF_NONE_MATCH, etag.to_string());
}
let status = request
.send()
.map_err(PagefeedError::from)
.and_then(|mut response| {
if response.status() == StatusCode::NOT_MODIFIED {
Ok(PageStatus::Unmodified)
} else {
let etag = response
.headers()
.get(header::ETAG)
.and_then(|x| x.to_str().ok())
.map(str::to_string);
let body_hash = hash(page, &mut response)?;
Ok(PageStatus::Modified { body_hash, etag })
}
})
.unwrap_or_else(|err| PageStatus::FetchError(format!("{:?}", err)));
match status {
PageStatus::Modified { ref body_hash,.. }
if Some(body_hash) == page.http_body_hash.as_ref() =>
{
PageStatus::Unmodified
}
PageStatus::FetchError(ref error) if Some(error) == page.last_error.as_ref() => {
PageStatus::Unmodified
}
_ => status,
}
}
// ----------------------------------------------------------------------------
fn hash(page: &Page, r: &mut dyn io::Read) -> Result<Vec<u8>, PagefeedError> {
let mut buf = Vec::new();
r.read_to_end(&mut buf)?;
if let Some(delete_regex) = page.delete_regex.as_ref() {
let re = regex::bytes::Regex::new(delete_regex)?;
buf = re.replace_all(&buf, &b""[..]).into_owned();
}
use tiny_keccak::{Hasher, Sha3};
let mut sha3 = Sha3::v256();
sha3.update(&buf);
let mut res: [u8; 32] = [0; 32];
sha3.finalize(&mut res);
Ok(res.to_vec())
}
// ----------------------------------------------------------------------------
fn get_enabled_pages(
conn: &mut postgres::Transaction,
) -> Result<Vec<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled
";
conn.query(query, &[])
.map(|rows| rows.iter().map(instantiate_page).collect())
}
fn get_page(
conn: &mut postgres::Transaction,
slug: &str,
) -> Result<Option<Page>, postgres::error::Error> {
let query = "
select *,
greatest(
last_checked + check_interval,
last_modified + cooldown,
to_timestamp(0)
) as next_check
from pages
where enabled and slug = $1
";
conn.query(query, &[&slug])
.map(|rows| rows.get(0).map(instantiate_page))
}
fn instantiate_page(row: &postgres::row::Row) -> Page {
Page {
slug: row.get("slug"),
name: row.get("name"),
url: row.get("url"),
//enabled: row.get("enabled"),
delete_regex: row.get("delete_regex"),
next_check: row.get("next_check"),
//last_checked: row.get("last_checked"),
last_modified: row.get("last_modified"),
last_error: row.get("last_error"),
item_id: row.get("item_id"),
http_etag: row.get("http_etag"),
http_body_hash: row.get("http_body_hash"),
}
}
fn update_page_unchanged(
conn: &mut postgres::Transaction,
page: &Page,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp
where slug = $1
";
conn.execute(query, &[&page.slug])?;
Ok(())
}
fn update_page_changed(
conn: &mut postgres::Transaction,
page: &Page,
new_etag: &Option<String>,
new_hash: &Vec<u8>,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = null,
item_id = $1,
http_etag = $2,
http_body_hash = $3
where slug = $4
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[&uuid, new_etag, new_hash, &page.slug])?;
Ok(())
}
fn update_page_error(
conn: &mut postgres::Transaction,
page: &Page,
error: &String,
) -> Result<(), postgres::error::Error> {
let query = "
update pages
set last_checked = current_timestamp,
last_modified = current_timestamp,
last_error = $1,
item_id = $2,
http_etag = null,
http_body_hash = null
where slug = $3
";
let uuid = uuid::Uuid::new_v4();
conn.execute(query, &[error, &uuid, &page.slug])?;
Ok(())
}
// ----------------------------------------------------------------------------
| {
handle_opml_request(&url, &mut w)
} | conditional_block |
rally.rs | use amethyst::{
assets::{AssetStorage, Handle, Loader},
core::Time,
ecs::prelude::{Dispatcher, DispatcherBuilder, Entity},
input::{is_close_requested, is_key_down, BindingTypes},
prelude::*,
renderer::{
debug_drawing::{DebugLines, DebugLinesComponent, DebugLinesParams},
ImageFormat, SpriteSheet, SpriteSheetFormat, Texture,
},
ui::{UiCreator, UiFinder, UiText, UiTransform},
utils::{
fps_counter::FpsCounter,
removal::{exec_removal, Removal},
},
winit::VirtualKeyCode,
};
use serde::{Deserialize, Serialize};
use std::fmt::{self, Display};
use crate::pause::PauseMenuState;
use crate::score_screen::ScoreScreen;
use crate::resources::{
initialize_weapon_fire_resource, ArenaNavMesh, ArenaNavMeshFinal, GameModeSetup, GameScore,
GameTeamSetup, GameVehicleSetup, WeaponFireResource,
};
use crate::entities::{
connect_players_to_ui, initialize_camera, initialize_camera_to_player, initialize_timer_ui,
intialize_arena, intialize_player, PlayerStatusText,
};
use crate::components::{
get_none_vehicle, ArenaElement, ArenaNames, ArenaProperties, ArenaStoreResource, Armor, Health,
Hitbox, Particles, Player, PlayerWeaponIcon, Repair, Shield, Vehicle, WeaponArray, WeaponFire,
};
use crate::systems::{
CameraTrackingSystem, CollisionVehToVehSystem, CollisionWeaponFireHitboxSystem,
MoveParticlesSystem, MoveWeaponFireSystem, PathingLinesSystem, VehicleMoveSystem,
VehicleShieldArmorHealthSystem, VehicleStatusSystem, VehicleTrackingSystem,
VehicleWeaponsSystem,
};
pub const PLAYER_CAMERA: bool = false;
pub const DEBUG_LINES: bool = false;
//cargo run --features sdl_controller
//Damage at speed of 100
pub const BASE_COLLISION_DAMAGE: f32 = 20.0;
pub const COLLISION_PIERCING_DAMAGE_PCT: f32 = 0.0;
pub const COLLISION_SHIELD_DAMAGE_PCT: f32 = 25.0;
pub const COLLISION_ARMOR_DAMAGE_PCT: f32 = 80.0;
pub const COLLISION_HEALTH_DAMAGE_PCT: f32 = 100.0;
#[derive(Default)]
pub struct GameplayState<'a, 'b> {
player_ui_initialized: bool,
// // If the Game is paused or not
pub paused: bool,
// The UI root entity. Deleting this should remove the complete UI
ui_root: Option<Entity>,
// A reference to the FPS display, which we want to interact with
fps_display: Option<Entity>,
/// The `State` specific `Dispatcher`, containing `System`s only relevant for this `State`.
dispatcher: Option<Dispatcher<'a, 'b>>,
sprite_sheet_handle: Option<Handle<SpriteSheet>>, // Load the spritesheet necessary to render the graphics.
texture_sheet_handle: Option<Handle<SpriteSheet>>,
}
impl<'a, 'b> SimpleState for GameplayState<'a, 'b> {
fn on_start(&mut self, mut data: StateData<'_, GameData<'_, '_>>) {
self.player_ui_initialized = false;
let world = &mut data.world;
self.ui_root =
Some(world.exec(|mut creator: UiCreator<'_>| creator.create("ui/gameplay.ron", ())));
world.register::<UiText>();
world.register::<UiTransform>();
world.register::<Armor>();
world.register::<Health>();
world.register::<ArenaElement>();
world.register::<Hitbox>();
world.register::<Player>();
world.register::<Repair>();
world.register::<Shield>();
world.register::<Vehicle>();
world.register::<WeaponArray>();
world.register::<WeaponFire>();
world.register::<Particles>();
world.register::<PlayerWeaponIcon>();
// Setup debug lines as a resource
world.insert(DebugLines::new());
// Configure width of lines. Optional step
world.insert(DebugLinesParams { line_width: 2.0 });
// Setup debug lines as a component and add lines to render axis&grid
let debug_lines_component = DebugLinesComponent::new();
world.create_entity().with(debug_lines_component).build();
world.register::<Removal<u32>>();
self.sprite_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_spritesheet.png".to_string(),
"texture/rally_spritesheet.ron".to_string(),
));
self.texture_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_texture_sheet.png".to_string(),
"texture/rally_texture_sheet.ron".to_string(),
));
let weapon_fire_resource: WeaponFireResource =
initialize_weapon_fire_resource(world, self.sprite_sheet_handle.clone().unwrap());
initialize_timer_ui(world);
world.insert(ArenaNavMesh {
vertices: Vec::new(),
triangles: Vec::new(),
});
world.insert(ArenaNavMeshFinal { mesh: None });
intialize_arena(
world,
self.sprite_sheet_handle.clone().unwrap(),
self.texture_sheet_handle.clone().unwrap(),
);
let max_players;
let bot_players;
let arena_name;
{
let fetched_game_mode_setup = world.try_fetch::<GameModeSetup>();
if let Some(game_mode_setup) = fetched_game_mode_setup {
max_players = game_mode_setup.max_players;
bot_players = game_mode_setup.bot_players;
arena_name = game_mode_setup.arena_name.clone();
} else {
max_players = 4;
bot_players = 3;
arena_name = ArenaNames::OpenEmptyMap;
}
}
let arena_properties;
{
let fetched_arena_store = world.try_fetch::<ArenaStoreResource>();
if let Some(arena_store) = fetched_arena_store {
arena_properties = match arena_store.properties.get(&arena_name) {
Some(arena_props_get) => (*arena_props_get).clone(),
_ => ArenaProperties::default(),
};
} else {
arena_properties = ArenaProperties::default();
}
}
let player_to_team;
{
let fetched_game_team_setup = world.try_fetch::<GameTeamSetup>();
if let Some(game_team_setup) = fetched_game_team_setup {
player_to_team = game_team_setup.teams.clone();
} else {
player_to_team = [0, 1, 2, 3];
}
}
let player_status_text = PlayerStatusText {
shield: None,
armor: None,
health: None,
points: None,
lives: None,
};
for player_index in 0..max_players {
let vehicle_stats;
{
let fetched_game_vehicle_setup = world.try_fetch::<GameVehicleSetup>();
if let Some(game_vehicle_setup) = fetched_game_vehicle_setup {
vehicle_stats = game_vehicle_setup.stats[player_index].clone();
} else {
vehicle_stats = get_none_vehicle();
}
}
let is_bot = player_index >= max_players - bot_players;
let player = intialize_player(
world,
self.sprite_sheet_handle.clone().unwrap(),
player_index,
weapon_fire_resource.clone(),
player_to_team[player_index],
is_bot,
player_status_text.clone(),
vehicle_stats,
);
if PLAYER_CAMERA &&!is_bot {
initialize_camera_to_player(world, &arena_properties, player);
}
}
if!PLAYER_CAMERA {
initialize_camera(world, &arena_properties);
}
// Create the `DispatcherBuilder` and register some `System`s that should only run for this `State`.
let mut dispatcher_builder = DispatcherBuilder::new();
dispatcher_builder.add(VehicleTrackingSystem, "vehicle_tracking_system", &[]);
dispatcher_builder.add(VehicleMoveSystem::default(), "vehicle_move_system", &[]);
dispatcher_builder.add(VehicleWeaponsSystem, "vehicle_weapons_system", &[]);
dispatcher_builder.add(
CollisionWeaponFireHitboxSystem::default(),
"collision_weapon_fire_hitbox_system",
&[],
);
dispatcher_builder.add(
MoveWeaponFireSystem::default(),
"move_weapon_fire_system",
&[],
);
dispatcher_builder.add(
CollisionVehToVehSystem,
"collision_vehicle_vehicle_system",
&[],
);
dispatcher_builder.add(
VehicleShieldArmorHealthSystem,
"vehicle_shield_armor_health_system",
&[],
);
dispatcher_builder.add(VehicleStatusSystem::default(), "vehicle_status_system", &[]);
dispatcher_builder.add(MoveParticlesSystem, "move_particles_system", &[]);
dispatcher_builder.add(PathingLinesSystem::default(), "pathing_lines_system", &[]);
dispatcher_builder.add(
CameraTrackingSystem::default(),
"camera_tracking_system",
&[],
);
// Build and setup the `Dispatcher`.
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(world);
self.dispatcher = Some(dispatcher);
}
fn on_pause(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = true;
}
fn on_resume(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = false;
}
fn on_stop(&mut self, data: StateData<'_, GameData<'_, '_>>) {
if let Some(root_entity) = self.ui_root {
data.world
.delete_entity(root_entity)
.expect("Failed to remove Game Screen");
}
let fetched_game_score = data.world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if!game_score.game_ended {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
} else {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
self.player_ui_initialized = false;
self.ui_root = None;
self.fps_display = None;
}
fn handle_event(
&mut self,
_: StateData<'_, GameData<'_, '_>>,
event: StateEvent,
) -> SimpleTrans {
match &event {
StateEvent::Window(event) => {
if is_close_requested(&event) {
log::info!("[Trans::Quit] Quitting Application!");
Trans::Quit
} else if is_key_down(&event, VirtualKeyCode::Escape) {
log::info!("[Trans::Push] Pausing Game!");
Trans::Push(Box::new(PauseMenuState::default()))
} else {
Trans::None
}
}
StateEvent::Ui(_ui_event) => |
StateEvent::Input(_input) => {
//log::info!("Input Event detected: {:?}.", input);
Trans::None
}
}
}
fn update(&mut self, data: &mut StateData<'_, GameData<'_, '_>>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world);
}
let world = &mut data.world;
// this cannot happen in 'on_start', as the entity might not be fully
// initialized/registered/created yet.
if self.fps_display.is_none() {
world.exec(|finder: UiFinder<'_>| {
if let Some(entity) = finder.find("fps") {
self.fps_display = Some(entity);
}
});
}
// it is important that the 'paused' field is actually pausing your game.
// Make sure to also pause your running systems.
if!self.paused {
let mut ui_text = world.write_storage::<UiText>();
if let Some(fps_display) = self.fps_display.and_then(|entity| ui_text.get_mut(entity)) {
if world.read_resource::<Time>().frame_number() % 20 == 0 &&!self.paused {
let fps = world.read_resource::<FpsCounter>().sampled_fps();
fps_display.text = format!("FPS: {:.*}", 2, fps);
}
}
}
if!self.player_ui_initialized {
let connected_success = connect_players_to_ui(world);
if connected_success {
self.player_ui_initialized = true;
}
}
let fetched_game_score = world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if game_score.game_ended {
return Trans::Switch(Box::new(ScoreScreen::default()));
}
}
Trans::None
}
}
pub fn load_sprite_sheet(world: &mut World, storage: String, store: String) -> Handle<SpriteSheet> {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(storage, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
store, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum AxisBinding {
VehicleAccel(usize),
VehicleTurn(usize),
VehicleStrafe(usize),
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum ActionBinding {
VehiclePriFire(usize),
VehicleAltFire(usize),
VehicleRepair(usize),
}
impl Display for AxisBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Display for ActionBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
pub struct MovementBindingTypes;
impl BindingTypes for MovementBindingTypes {
type Axis = AxisBinding;
type Action = ActionBinding;
}
| {
// log::info!(
// "[HANDLE_EVENT] You just interacted with a ui element: {:?}",
// ui_event
// );
Trans::None
} | conditional_block |
rally.rs | use amethyst::{
assets::{AssetStorage, Handle, Loader},
core::Time,
ecs::prelude::{Dispatcher, DispatcherBuilder, Entity},
input::{is_close_requested, is_key_down, BindingTypes},
prelude::*,
renderer::{
debug_drawing::{DebugLines, DebugLinesComponent, DebugLinesParams},
ImageFormat, SpriteSheet, SpriteSheetFormat, Texture,
},
ui::{UiCreator, UiFinder, UiText, UiTransform},
utils::{
fps_counter::FpsCounter,
removal::{exec_removal, Removal},
},
winit::VirtualKeyCode,
};
use serde::{Deserialize, Serialize};
use std::fmt::{self, Display};
use crate::pause::PauseMenuState;
use crate::score_screen::ScoreScreen;
use crate::resources::{
initialize_weapon_fire_resource, ArenaNavMesh, ArenaNavMeshFinal, GameModeSetup, GameScore,
GameTeamSetup, GameVehicleSetup, WeaponFireResource,
};
use crate::entities::{
connect_players_to_ui, initialize_camera, initialize_camera_to_player, initialize_timer_ui,
intialize_arena, intialize_player, PlayerStatusText,
};
use crate::components::{
get_none_vehicle, ArenaElement, ArenaNames, ArenaProperties, ArenaStoreResource, Armor, Health,
Hitbox, Particles, Player, PlayerWeaponIcon, Repair, Shield, Vehicle, WeaponArray, WeaponFire,
};
use crate::systems::{
CameraTrackingSystem, CollisionVehToVehSystem, CollisionWeaponFireHitboxSystem,
MoveParticlesSystem, MoveWeaponFireSystem, PathingLinesSystem, VehicleMoveSystem,
VehicleShieldArmorHealthSystem, VehicleStatusSystem, VehicleTrackingSystem,
VehicleWeaponsSystem,
};
pub const PLAYER_CAMERA: bool = false;
pub const DEBUG_LINES: bool = false;
//cargo run --features sdl_controller
//Damage at speed of 100
pub const BASE_COLLISION_DAMAGE: f32 = 20.0;
pub const COLLISION_PIERCING_DAMAGE_PCT: f32 = 0.0;
pub const COLLISION_SHIELD_DAMAGE_PCT: f32 = 25.0;
pub const COLLISION_ARMOR_DAMAGE_PCT: f32 = 80.0;
pub const COLLISION_HEALTH_DAMAGE_PCT: f32 = 100.0;
#[derive(Default)]
pub struct GameplayState<'a, 'b> {
player_ui_initialized: bool,
// // If the Game is paused or not
pub paused: bool,
// The UI root entity. Deleting this should remove the complete UI
ui_root: Option<Entity>,
// A reference to the FPS display, which we want to interact with
fps_display: Option<Entity>,
/// The `State` specific `Dispatcher`, containing `System`s only relevant for this `State`.
dispatcher: Option<Dispatcher<'a, 'b>>,
sprite_sheet_handle: Option<Handle<SpriteSheet>>, // Load the spritesheet necessary to render the graphics.
texture_sheet_handle: Option<Handle<SpriteSheet>>,
}
impl<'a, 'b> SimpleState for GameplayState<'a, 'b> {
fn on_start(&mut self, mut data: StateData<'_, GameData<'_, '_>>) {
self.player_ui_initialized = false;
let world = &mut data.world;
self.ui_root =
Some(world.exec(|mut creator: UiCreator<'_>| creator.create("ui/gameplay.ron", ())));
world.register::<UiText>();
world.register::<UiTransform>();
world.register::<Armor>();
world.register::<Health>();
world.register::<ArenaElement>();
world.register::<Hitbox>();
world.register::<Player>();
world.register::<Repair>();
world.register::<Shield>();
world.register::<Vehicle>();
world.register::<WeaponArray>();
world.register::<WeaponFire>();
world.register::<Particles>();
world.register::<PlayerWeaponIcon>();
// Setup debug lines as a resource
world.insert(DebugLines::new());
// Configure width of lines. Optional step
world.insert(DebugLinesParams { line_width: 2.0 });
// Setup debug lines as a component and add lines to render axis&grid
let debug_lines_component = DebugLinesComponent::new();
world.create_entity().with(debug_lines_component).build();
world.register::<Removal<u32>>();
self.sprite_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_spritesheet.png".to_string(),
"texture/rally_spritesheet.ron".to_string(),
));
self.texture_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_texture_sheet.png".to_string(),
"texture/rally_texture_sheet.ron".to_string(),
));
let weapon_fire_resource: WeaponFireResource =
initialize_weapon_fire_resource(world, self.sprite_sheet_handle.clone().unwrap());
initialize_timer_ui(world);
world.insert(ArenaNavMesh {
vertices: Vec::new(),
triangles: Vec::new(),
});
world.insert(ArenaNavMeshFinal { mesh: None });
intialize_arena(
world,
self.sprite_sheet_handle.clone().unwrap(),
self.texture_sheet_handle.clone().unwrap(),
);
let max_players;
let bot_players;
let arena_name;
{
let fetched_game_mode_setup = world.try_fetch::<GameModeSetup>();
if let Some(game_mode_setup) = fetched_game_mode_setup {
max_players = game_mode_setup.max_players;
bot_players = game_mode_setup.bot_players;
arena_name = game_mode_setup.arena_name.clone();
} else {
max_players = 4;
bot_players = 3;
arena_name = ArenaNames::OpenEmptyMap;
}
}
let arena_properties;
{
let fetched_arena_store = world.try_fetch::<ArenaStoreResource>();
if let Some(arena_store) = fetched_arena_store {
arena_properties = match arena_store.properties.get(&arena_name) {
Some(arena_props_get) => (*arena_props_get).clone(),
_ => ArenaProperties::default(),
};
} else {
arena_properties = ArenaProperties::default();
}
}
let player_to_team;
{
let fetched_game_team_setup = world.try_fetch::<GameTeamSetup>();
if let Some(game_team_setup) = fetched_game_team_setup {
player_to_team = game_team_setup.teams.clone();
} else {
player_to_team = [0, 1, 2, 3];
}
}
let player_status_text = PlayerStatusText {
shield: None,
armor: None,
health: None,
points: None,
lives: None,
};
for player_index in 0..max_players {
let vehicle_stats;
{
let fetched_game_vehicle_setup = world.try_fetch::<GameVehicleSetup>();
if let Some(game_vehicle_setup) = fetched_game_vehicle_setup {
vehicle_stats = game_vehicle_setup.stats[player_index].clone();
} else {
vehicle_stats = get_none_vehicle();
}
}
let is_bot = player_index >= max_players - bot_players;
let player = intialize_player(
world,
self.sprite_sheet_handle.clone().unwrap(),
player_index,
weapon_fire_resource.clone(),
player_to_team[player_index],
is_bot,
player_status_text.clone(),
vehicle_stats,
);
if PLAYER_CAMERA &&!is_bot {
initialize_camera_to_player(world, &arena_properties, player);
}
}
if!PLAYER_CAMERA {
initialize_camera(world, &arena_properties);
}
// Create the `DispatcherBuilder` and register some `System`s that should only run for this `State`.
let mut dispatcher_builder = DispatcherBuilder::new();
dispatcher_builder.add(VehicleTrackingSystem, "vehicle_tracking_system", &[]);
dispatcher_builder.add(VehicleMoveSystem::default(), "vehicle_move_system", &[]);
dispatcher_builder.add(VehicleWeaponsSystem, "vehicle_weapons_system", &[]);
dispatcher_builder.add(
CollisionWeaponFireHitboxSystem::default(),
"collision_weapon_fire_hitbox_system",
&[],
);
dispatcher_builder.add(
MoveWeaponFireSystem::default(),
"move_weapon_fire_system",
&[],
);
dispatcher_builder.add(
CollisionVehToVehSystem,
"collision_vehicle_vehicle_system",
&[],
);
dispatcher_builder.add(
VehicleShieldArmorHealthSystem,
"vehicle_shield_armor_health_system",
&[],
);
dispatcher_builder.add(VehicleStatusSystem::default(), "vehicle_status_system", &[]);
dispatcher_builder.add(MoveParticlesSystem, "move_particles_system", &[]);
dispatcher_builder.add(PathingLinesSystem::default(), "pathing_lines_system", &[]);
dispatcher_builder.add(
CameraTrackingSystem::default(),
"camera_tracking_system",
&[],
);
// Build and setup the `Dispatcher`.
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(world);
self.dispatcher = Some(dispatcher);
} | }
fn on_resume(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = false;
}
fn on_stop(&mut self, data: StateData<'_, GameData<'_, '_>>) {
if let Some(root_entity) = self.ui_root {
data.world
.delete_entity(root_entity)
.expect("Failed to remove Game Screen");
}
let fetched_game_score = data.world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if!game_score.game_ended {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
} else {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
self.player_ui_initialized = false;
self.ui_root = None;
self.fps_display = None;
}
fn handle_event(
&mut self,
_: StateData<'_, GameData<'_, '_>>,
event: StateEvent,
) -> SimpleTrans {
match &event {
StateEvent::Window(event) => {
if is_close_requested(&event) {
log::info!("[Trans::Quit] Quitting Application!");
Trans::Quit
} else if is_key_down(&event, VirtualKeyCode::Escape) {
log::info!("[Trans::Push] Pausing Game!");
Trans::Push(Box::new(PauseMenuState::default()))
} else {
Trans::None
}
}
StateEvent::Ui(_ui_event) => {
// log::info!(
// "[HANDLE_EVENT] You just interacted with a ui element: {:?}",
// ui_event
// );
Trans::None
}
StateEvent::Input(_input) => {
//log::info!("Input Event detected: {:?}.", input);
Trans::None
}
}
}
fn update(&mut self, data: &mut StateData<'_, GameData<'_, '_>>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world);
}
let world = &mut data.world;
// this cannot happen in 'on_start', as the entity might not be fully
// initialized/registered/created yet.
if self.fps_display.is_none() {
world.exec(|finder: UiFinder<'_>| {
if let Some(entity) = finder.find("fps") {
self.fps_display = Some(entity);
}
});
}
// it is important that the 'paused' field is actually pausing your game.
// Make sure to also pause your running systems.
if!self.paused {
let mut ui_text = world.write_storage::<UiText>();
if let Some(fps_display) = self.fps_display.and_then(|entity| ui_text.get_mut(entity)) {
if world.read_resource::<Time>().frame_number() % 20 == 0 &&!self.paused {
let fps = world.read_resource::<FpsCounter>().sampled_fps();
fps_display.text = format!("FPS: {:.*}", 2, fps);
}
}
}
if!self.player_ui_initialized {
let connected_success = connect_players_to_ui(world);
if connected_success {
self.player_ui_initialized = true;
}
}
let fetched_game_score = world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if game_score.game_ended {
return Trans::Switch(Box::new(ScoreScreen::default()));
}
}
Trans::None
}
}
pub fn load_sprite_sheet(world: &mut World, storage: String, store: String) -> Handle<SpriteSheet> {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(storage, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
store, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum AxisBinding {
VehicleAccel(usize),
VehicleTurn(usize),
VehicleStrafe(usize),
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum ActionBinding {
VehiclePriFire(usize),
VehicleAltFire(usize),
VehicleRepair(usize),
}
impl Display for AxisBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Display for ActionBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
pub struct MovementBindingTypes;
impl BindingTypes for MovementBindingTypes {
type Axis = AxisBinding;
type Action = ActionBinding;
} |
fn on_pause(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = true; | random_line_split |
rally.rs | use amethyst::{
assets::{AssetStorage, Handle, Loader},
core::Time,
ecs::prelude::{Dispatcher, DispatcherBuilder, Entity},
input::{is_close_requested, is_key_down, BindingTypes},
prelude::*,
renderer::{
debug_drawing::{DebugLines, DebugLinesComponent, DebugLinesParams},
ImageFormat, SpriteSheet, SpriteSheetFormat, Texture,
},
ui::{UiCreator, UiFinder, UiText, UiTransform},
utils::{
fps_counter::FpsCounter,
removal::{exec_removal, Removal},
},
winit::VirtualKeyCode,
};
use serde::{Deserialize, Serialize};
use std::fmt::{self, Display};
use crate::pause::PauseMenuState;
use crate::score_screen::ScoreScreen;
use crate::resources::{
initialize_weapon_fire_resource, ArenaNavMesh, ArenaNavMeshFinal, GameModeSetup, GameScore,
GameTeamSetup, GameVehicleSetup, WeaponFireResource,
};
use crate::entities::{
connect_players_to_ui, initialize_camera, initialize_camera_to_player, initialize_timer_ui,
intialize_arena, intialize_player, PlayerStatusText,
};
use crate::components::{
get_none_vehicle, ArenaElement, ArenaNames, ArenaProperties, ArenaStoreResource, Armor, Health,
Hitbox, Particles, Player, PlayerWeaponIcon, Repair, Shield, Vehicle, WeaponArray, WeaponFire,
};
use crate::systems::{
CameraTrackingSystem, CollisionVehToVehSystem, CollisionWeaponFireHitboxSystem,
MoveParticlesSystem, MoveWeaponFireSystem, PathingLinesSystem, VehicleMoveSystem,
VehicleShieldArmorHealthSystem, VehicleStatusSystem, VehicleTrackingSystem,
VehicleWeaponsSystem,
};
pub const PLAYER_CAMERA: bool = false;
pub const DEBUG_LINES: bool = false;
//cargo run --features sdl_controller
//Damage at speed of 100
pub const BASE_COLLISION_DAMAGE: f32 = 20.0;
pub const COLLISION_PIERCING_DAMAGE_PCT: f32 = 0.0;
pub const COLLISION_SHIELD_DAMAGE_PCT: f32 = 25.0;
pub const COLLISION_ARMOR_DAMAGE_PCT: f32 = 80.0;
pub const COLLISION_HEALTH_DAMAGE_PCT: f32 = 100.0;
#[derive(Default)]
pub struct GameplayState<'a, 'b> {
player_ui_initialized: bool,
// // If the Game is paused or not
pub paused: bool,
// The UI root entity. Deleting this should remove the complete UI
ui_root: Option<Entity>,
// A reference to the FPS display, which we want to interact with
fps_display: Option<Entity>,
/// The `State` specific `Dispatcher`, containing `System`s only relevant for this `State`.
dispatcher: Option<Dispatcher<'a, 'b>>,
sprite_sheet_handle: Option<Handle<SpriteSheet>>, // Load the spritesheet necessary to render the graphics.
texture_sheet_handle: Option<Handle<SpriteSheet>>,
}
impl<'a, 'b> SimpleState for GameplayState<'a, 'b> {
fn on_start(&mut self, mut data: StateData<'_, GameData<'_, '_>>) {
self.player_ui_initialized = false;
let world = &mut data.world;
self.ui_root =
Some(world.exec(|mut creator: UiCreator<'_>| creator.create("ui/gameplay.ron", ())));
world.register::<UiText>();
world.register::<UiTransform>();
world.register::<Armor>();
world.register::<Health>();
world.register::<ArenaElement>();
world.register::<Hitbox>();
world.register::<Player>();
world.register::<Repair>();
world.register::<Shield>();
world.register::<Vehicle>();
world.register::<WeaponArray>();
world.register::<WeaponFire>();
world.register::<Particles>();
world.register::<PlayerWeaponIcon>();
// Setup debug lines as a resource
world.insert(DebugLines::new());
// Configure width of lines. Optional step
world.insert(DebugLinesParams { line_width: 2.0 });
// Setup debug lines as a component and add lines to render axis&grid
let debug_lines_component = DebugLinesComponent::new();
world.create_entity().with(debug_lines_component).build();
world.register::<Removal<u32>>();
self.sprite_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_spritesheet.png".to_string(),
"texture/rally_spritesheet.ron".to_string(),
));
self.texture_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_texture_sheet.png".to_string(),
"texture/rally_texture_sheet.ron".to_string(),
));
let weapon_fire_resource: WeaponFireResource =
initialize_weapon_fire_resource(world, self.sprite_sheet_handle.clone().unwrap());
initialize_timer_ui(world);
world.insert(ArenaNavMesh {
vertices: Vec::new(),
triangles: Vec::new(),
});
world.insert(ArenaNavMeshFinal { mesh: None });
intialize_arena(
world,
self.sprite_sheet_handle.clone().unwrap(),
self.texture_sheet_handle.clone().unwrap(),
);
let max_players;
let bot_players;
let arena_name;
{
let fetched_game_mode_setup = world.try_fetch::<GameModeSetup>();
if let Some(game_mode_setup) = fetched_game_mode_setup {
max_players = game_mode_setup.max_players;
bot_players = game_mode_setup.bot_players;
arena_name = game_mode_setup.arena_name.clone();
} else {
max_players = 4;
bot_players = 3;
arena_name = ArenaNames::OpenEmptyMap;
}
}
let arena_properties;
{
let fetched_arena_store = world.try_fetch::<ArenaStoreResource>();
if let Some(arena_store) = fetched_arena_store {
arena_properties = match arena_store.properties.get(&arena_name) {
Some(arena_props_get) => (*arena_props_get).clone(),
_ => ArenaProperties::default(),
};
} else {
arena_properties = ArenaProperties::default();
}
}
let player_to_team;
{
let fetched_game_team_setup = world.try_fetch::<GameTeamSetup>();
if let Some(game_team_setup) = fetched_game_team_setup {
player_to_team = game_team_setup.teams.clone();
} else {
player_to_team = [0, 1, 2, 3];
}
}
let player_status_text = PlayerStatusText {
shield: None,
armor: None,
health: None,
points: None,
lives: None,
};
for player_index in 0..max_players {
let vehicle_stats;
{
let fetched_game_vehicle_setup = world.try_fetch::<GameVehicleSetup>();
if let Some(game_vehicle_setup) = fetched_game_vehicle_setup {
vehicle_stats = game_vehicle_setup.stats[player_index].clone();
} else {
vehicle_stats = get_none_vehicle();
}
}
let is_bot = player_index >= max_players - bot_players;
let player = intialize_player(
world,
self.sprite_sheet_handle.clone().unwrap(),
player_index,
weapon_fire_resource.clone(),
player_to_team[player_index],
is_bot,
player_status_text.clone(),
vehicle_stats,
);
if PLAYER_CAMERA &&!is_bot {
initialize_camera_to_player(world, &arena_properties, player);
}
}
if!PLAYER_CAMERA {
initialize_camera(world, &arena_properties);
}
// Create the `DispatcherBuilder` and register some `System`s that should only run for this `State`.
let mut dispatcher_builder = DispatcherBuilder::new();
dispatcher_builder.add(VehicleTrackingSystem, "vehicle_tracking_system", &[]);
dispatcher_builder.add(VehicleMoveSystem::default(), "vehicle_move_system", &[]);
dispatcher_builder.add(VehicleWeaponsSystem, "vehicle_weapons_system", &[]);
dispatcher_builder.add(
CollisionWeaponFireHitboxSystem::default(),
"collision_weapon_fire_hitbox_system",
&[],
);
dispatcher_builder.add(
MoveWeaponFireSystem::default(),
"move_weapon_fire_system",
&[],
);
dispatcher_builder.add(
CollisionVehToVehSystem,
"collision_vehicle_vehicle_system",
&[],
);
dispatcher_builder.add(
VehicleShieldArmorHealthSystem,
"vehicle_shield_armor_health_system",
&[],
);
dispatcher_builder.add(VehicleStatusSystem::default(), "vehicle_status_system", &[]);
dispatcher_builder.add(MoveParticlesSystem, "move_particles_system", &[]);
dispatcher_builder.add(PathingLinesSystem::default(), "pathing_lines_system", &[]);
dispatcher_builder.add(
CameraTrackingSystem::default(),
"camera_tracking_system",
&[],
);
// Build and setup the `Dispatcher`.
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(world);
self.dispatcher = Some(dispatcher);
}
fn on_pause(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = true;
}
fn on_resume(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = false;
}
fn on_stop(&mut self, data: StateData<'_, GameData<'_, '_>>) {
if let Some(root_entity) = self.ui_root {
data.world
.delete_entity(root_entity)
.expect("Failed to remove Game Screen");
}
let fetched_game_score = data.world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if!game_score.game_ended {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
} else {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
self.player_ui_initialized = false;
self.ui_root = None;
self.fps_display = None;
}
fn handle_event(
&mut self,
_: StateData<'_, GameData<'_, '_>>,
event: StateEvent,
) -> SimpleTrans {
match &event {
StateEvent::Window(event) => {
if is_close_requested(&event) {
log::info!("[Trans::Quit] Quitting Application!");
Trans::Quit
} else if is_key_down(&event, VirtualKeyCode::Escape) {
log::info!("[Trans::Push] Pausing Game!");
Trans::Push(Box::new(PauseMenuState::default()))
} else {
Trans::None
}
}
StateEvent::Ui(_ui_event) => {
// log::info!(
// "[HANDLE_EVENT] You just interacted with a ui element: {:?}",
// ui_event
// );
Trans::None
}
StateEvent::Input(_input) => {
//log::info!("Input Event detected: {:?}.", input);
Trans::None
}
}
}
fn update(&mut self, data: &mut StateData<'_, GameData<'_, '_>>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world);
}
let world = &mut data.world;
// this cannot happen in 'on_start', as the entity might not be fully
// initialized/registered/created yet.
if self.fps_display.is_none() {
world.exec(|finder: UiFinder<'_>| {
if let Some(entity) = finder.find("fps") {
self.fps_display = Some(entity);
}
});
}
// it is important that the 'paused' field is actually pausing your game.
// Make sure to also pause your running systems.
if!self.paused {
let mut ui_text = world.write_storage::<UiText>();
if let Some(fps_display) = self.fps_display.and_then(|entity| ui_text.get_mut(entity)) {
if world.read_resource::<Time>().frame_number() % 20 == 0 &&!self.paused {
let fps = world.read_resource::<FpsCounter>().sampled_fps();
fps_display.text = format!("FPS: {:.*}", 2, fps);
}
}
}
if!self.player_ui_initialized {
let connected_success = connect_players_to_ui(world);
if connected_success {
self.player_ui_initialized = true;
}
}
let fetched_game_score = world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if game_score.game_ended {
return Trans::Switch(Box::new(ScoreScreen::default()));
}
}
Trans::None
}
}
pub fn load_sprite_sheet(world: &mut World, storage: String, store: String) -> Handle<SpriteSheet> {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(storage, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
store, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum AxisBinding {
VehicleAccel(usize),
VehicleTurn(usize),
VehicleStrafe(usize),
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum ActionBinding {
VehiclePriFire(usize),
VehicleAltFire(usize),
VehicleRepair(usize),
}
impl Display for AxisBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Display for ActionBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
#[derive(Debug)]
pub struct MovementBindingTypes;
impl BindingTypes for MovementBindingTypes {
type Axis = AxisBinding;
type Action = ActionBinding;
}
| {
write!(f, "{:?}", self)
} | identifier_body |
rally.rs | use amethyst::{
assets::{AssetStorage, Handle, Loader},
core::Time,
ecs::prelude::{Dispatcher, DispatcherBuilder, Entity},
input::{is_close_requested, is_key_down, BindingTypes},
prelude::*,
renderer::{
debug_drawing::{DebugLines, DebugLinesComponent, DebugLinesParams},
ImageFormat, SpriteSheet, SpriteSheetFormat, Texture,
},
ui::{UiCreator, UiFinder, UiText, UiTransform},
utils::{
fps_counter::FpsCounter,
removal::{exec_removal, Removal},
},
winit::VirtualKeyCode,
};
use serde::{Deserialize, Serialize};
use std::fmt::{self, Display};
use crate::pause::PauseMenuState;
use crate::score_screen::ScoreScreen;
use crate::resources::{
initialize_weapon_fire_resource, ArenaNavMesh, ArenaNavMeshFinal, GameModeSetup, GameScore,
GameTeamSetup, GameVehicleSetup, WeaponFireResource,
};
use crate::entities::{
connect_players_to_ui, initialize_camera, initialize_camera_to_player, initialize_timer_ui,
intialize_arena, intialize_player, PlayerStatusText,
};
use crate::components::{
get_none_vehicle, ArenaElement, ArenaNames, ArenaProperties, ArenaStoreResource, Armor, Health,
Hitbox, Particles, Player, PlayerWeaponIcon, Repair, Shield, Vehicle, WeaponArray, WeaponFire,
};
use crate::systems::{
CameraTrackingSystem, CollisionVehToVehSystem, CollisionWeaponFireHitboxSystem,
MoveParticlesSystem, MoveWeaponFireSystem, PathingLinesSystem, VehicleMoveSystem,
VehicleShieldArmorHealthSystem, VehicleStatusSystem, VehicleTrackingSystem,
VehicleWeaponsSystem,
};
pub const PLAYER_CAMERA: bool = false;
pub const DEBUG_LINES: bool = false;
//cargo run --features sdl_controller
//Damage at speed of 100
pub const BASE_COLLISION_DAMAGE: f32 = 20.0;
pub const COLLISION_PIERCING_DAMAGE_PCT: f32 = 0.0;
pub const COLLISION_SHIELD_DAMAGE_PCT: f32 = 25.0;
pub const COLLISION_ARMOR_DAMAGE_PCT: f32 = 80.0;
pub const COLLISION_HEALTH_DAMAGE_PCT: f32 = 100.0;
#[derive(Default)]
pub struct GameplayState<'a, 'b> {
player_ui_initialized: bool,
// // If the Game is paused or not
pub paused: bool,
// The UI root entity. Deleting this should remove the complete UI
ui_root: Option<Entity>,
// A reference to the FPS display, which we want to interact with
fps_display: Option<Entity>,
/// The `State` specific `Dispatcher`, containing `System`s only relevant for this `State`.
dispatcher: Option<Dispatcher<'a, 'b>>,
sprite_sheet_handle: Option<Handle<SpriteSheet>>, // Load the spritesheet necessary to render the graphics.
texture_sheet_handle: Option<Handle<SpriteSheet>>,
}
impl<'a, 'b> SimpleState for GameplayState<'a, 'b> {
fn on_start(&mut self, mut data: StateData<'_, GameData<'_, '_>>) {
self.player_ui_initialized = false;
let world = &mut data.world;
self.ui_root =
Some(world.exec(|mut creator: UiCreator<'_>| creator.create("ui/gameplay.ron", ())));
world.register::<UiText>();
world.register::<UiTransform>();
world.register::<Armor>();
world.register::<Health>();
world.register::<ArenaElement>();
world.register::<Hitbox>();
world.register::<Player>();
world.register::<Repair>();
world.register::<Shield>();
world.register::<Vehicle>();
world.register::<WeaponArray>();
world.register::<WeaponFire>();
world.register::<Particles>();
world.register::<PlayerWeaponIcon>();
// Setup debug lines as a resource
world.insert(DebugLines::new());
// Configure width of lines. Optional step
world.insert(DebugLinesParams { line_width: 2.0 });
// Setup debug lines as a component and add lines to render axis&grid
let debug_lines_component = DebugLinesComponent::new();
world.create_entity().with(debug_lines_component).build();
world.register::<Removal<u32>>();
self.sprite_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_spritesheet.png".to_string(),
"texture/rally_spritesheet.ron".to_string(),
));
self.texture_sheet_handle.replace(load_sprite_sheet(
world,
"texture/rally_texture_sheet.png".to_string(),
"texture/rally_texture_sheet.ron".to_string(),
));
let weapon_fire_resource: WeaponFireResource =
initialize_weapon_fire_resource(world, self.sprite_sheet_handle.clone().unwrap());
initialize_timer_ui(world);
world.insert(ArenaNavMesh {
vertices: Vec::new(),
triangles: Vec::new(),
});
world.insert(ArenaNavMeshFinal { mesh: None });
intialize_arena(
world,
self.sprite_sheet_handle.clone().unwrap(),
self.texture_sheet_handle.clone().unwrap(),
);
let max_players;
let bot_players;
let arena_name;
{
let fetched_game_mode_setup = world.try_fetch::<GameModeSetup>();
if let Some(game_mode_setup) = fetched_game_mode_setup {
max_players = game_mode_setup.max_players;
bot_players = game_mode_setup.bot_players;
arena_name = game_mode_setup.arena_name.clone();
} else {
max_players = 4;
bot_players = 3;
arena_name = ArenaNames::OpenEmptyMap;
}
}
let arena_properties;
{
let fetched_arena_store = world.try_fetch::<ArenaStoreResource>();
if let Some(arena_store) = fetched_arena_store {
arena_properties = match arena_store.properties.get(&arena_name) {
Some(arena_props_get) => (*arena_props_get).clone(),
_ => ArenaProperties::default(),
};
} else {
arena_properties = ArenaProperties::default();
}
}
let player_to_team;
{
let fetched_game_team_setup = world.try_fetch::<GameTeamSetup>();
if let Some(game_team_setup) = fetched_game_team_setup {
player_to_team = game_team_setup.teams.clone();
} else {
player_to_team = [0, 1, 2, 3];
}
}
let player_status_text = PlayerStatusText {
shield: None,
armor: None,
health: None,
points: None,
lives: None,
};
for player_index in 0..max_players {
let vehicle_stats;
{
let fetched_game_vehicle_setup = world.try_fetch::<GameVehicleSetup>();
if let Some(game_vehicle_setup) = fetched_game_vehicle_setup {
vehicle_stats = game_vehicle_setup.stats[player_index].clone();
} else {
vehicle_stats = get_none_vehicle();
}
}
let is_bot = player_index >= max_players - bot_players;
let player = intialize_player(
world,
self.sprite_sheet_handle.clone().unwrap(),
player_index,
weapon_fire_resource.clone(),
player_to_team[player_index],
is_bot,
player_status_text.clone(),
vehicle_stats,
);
if PLAYER_CAMERA &&!is_bot {
initialize_camera_to_player(world, &arena_properties, player);
}
}
if!PLAYER_CAMERA {
initialize_camera(world, &arena_properties);
}
// Create the `DispatcherBuilder` and register some `System`s that should only run for this `State`.
let mut dispatcher_builder = DispatcherBuilder::new();
dispatcher_builder.add(VehicleTrackingSystem, "vehicle_tracking_system", &[]);
dispatcher_builder.add(VehicleMoveSystem::default(), "vehicle_move_system", &[]);
dispatcher_builder.add(VehicleWeaponsSystem, "vehicle_weapons_system", &[]);
dispatcher_builder.add(
CollisionWeaponFireHitboxSystem::default(),
"collision_weapon_fire_hitbox_system",
&[],
);
dispatcher_builder.add(
MoveWeaponFireSystem::default(),
"move_weapon_fire_system",
&[],
);
dispatcher_builder.add(
CollisionVehToVehSystem,
"collision_vehicle_vehicle_system",
&[],
);
dispatcher_builder.add(
VehicleShieldArmorHealthSystem,
"vehicle_shield_armor_health_system",
&[],
);
dispatcher_builder.add(VehicleStatusSystem::default(), "vehicle_status_system", &[]);
dispatcher_builder.add(MoveParticlesSystem, "move_particles_system", &[]);
dispatcher_builder.add(PathingLinesSystem::default(), "pathing_lines_system", &[]);
dispatcher_builder.add(
CameraTrackingSystem::default(),
"camera_tracking_system",
&[],
);
// Build and setup the `Dispatcher`.
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(world);
self.dispatcher = Some(dispatcher);
}
fn on_pause(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = true;
}
fn on_resume(&mut self, _data: StateData<'_, GameData<'_, '_>>) {
self.paused = false;
}
fn | (&mut self, data: StateData<'_, GameData<'_, '_>>) {
if let Some(root_entity) = self.ui_root {
data.world
.delete_entity(root_entity)
.expect("Failed to remove Game Screen");
}
let fetched_game_score = data.world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if!game_score.game_ended {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
} else {
exec_removal(&data.world.entities(), &data.world.read_storage(), 0 as u32);
}
self.player_ui_initialized = false;
self.ui_root = None;
self.fps_display = None;
}
fn handle_event(
&mut self,
_: StateData<'_, GameData<'_, '_>>,
event: StateEvent,
) -> SimpleTrans {
match &event {
StateEvent::Window(event) => {
if is_close_requested(&event) {
log::info!("[Trans::Quit] Quitting Application!");
Trans::Quit
} else if is_key_down(&event, VirtualKeyCode::Escape) {
log::info!("[Trans::Push] Pausing Game!");
Trans::Push(Box::new(PauseMenuState::default()))
} else {
Trans::None
}
}
StateEvent::Ui(_ui_event) => {
// log::info!(
// "[HANDLE_EVENT] You just interacted with a ui element: {:?}",
// ui_event
// );
Trans::None
}
StateEvent::Input(_input) => {
//log::info!("Input Event detected: {:?}.", input);
Trans::None
}
}
}
fn update(&mut self, data: &mut StateData<'_, GameData<'_, '_>>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world);
}
let world = &mut data.world;
// this cannot happen in 'on_start', as the entity might not be fully
// initialized/registered/created yet.
if self.fps_display.is_none() {
world.exec(|finder: UiFinder<'_>| {
if let Some(entity) = finder.find("fps") {
self.fps_display = Some(entity);
}
});
}
// it is important that the 'paused' field is actually pausing your game.
// Make sure to also pause your running systems.
if!self.paused {
let mut ui_text = world.write_storage::<UiText>();
if let Some(fps_display) = self.fps_display.and_then(|entity| ui_text.get_mut(entity)) {
if world.read_resource::<Time>().frame_number() % 20 == 0 &&!self.paused {
let fps = world.read_resource::<FpsCounter>().sampled_fps();
fps_display.text = format!("FPS: {:.*}", 2, fps);
}
}
}
if!self.player_ui_initialized {
let connected_success = connect_players_to_ui(world);
if connected_success {
self.player_ui_initialized = true;
}
}
let fetched_game_score = world.try_fetch::<GameScore>();
if let Some(game_score) = fetched_game_score {
if game_score.game_ended {
return Trans::Switch(Box::new(ScoreScreen::default()));
}
}
Trans::None
}
}
pub fn load_sprite_sheet(world: &mut World, storage: String, store: String) -> Handle<SpriteSheet> {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(storage, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
store, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum AxisBinding {
VehicleAccel(usize),
VehicleTurn(usize),
VehicleStrafe(usize),
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub enum ActionBinding {
VehiclePriFire(usize),
VehicleAltFire(usize),
VehicleRepair(usize),
}
impl Display for AxisBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Display for ActionBinding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
pub struct MovementBindingTypes;
impl BindingTypes for MovementBindingTypes {
type Axis = AxisBinding;
type Action = ActionBinding;
}
| on_stop | identifier_name |
judger.rs | use crate::config::Config;
use crate::{WsMessage, WsStream};
use heng_utils::container::inject;
use heng_protocol::common::JudgeResult;
use heng_protocol::error::ErrorCode;
use heng_protocol::internal::{ConnectionSettings, ErrorInfo, PartialConnectionSettings};
use heng_protocol::internal::ws_json::{
CreateJudgeArgs, FinishJudgeArgs, Message as RpcMessage, ReportStatusArgs,
Request as RpcRequest, Response as RpcResponse, UpdateJudgeArgs,
};
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering::Relaxed};
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use chrono::Utc;
use dashmap::DashMap;
use futures::stream::SplitStream;
use futures::StreamExt;
use futures::TryFutureExt;
use serde::Serialize;
use serde_json::value::RawValue;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::{task, time};
use tokio_stream::wrappers::ReceiverStream;
use tokio_tungstenite::tungstenite;
use tracing::{debug, error, info, warn};
use tungstenite::protocol::frame::coding::CloseCode;
use tungstenite::protocol::CloseFrame;
pub struct Judger {
settings: Settings,
counter: Mutex<Counter>,
session: WsSession,
rpc_timeout: u64,
}
struct WsSession {
sender: mpsc::Sender<WsMessage>,
seq: AtomicU32,
callbacks: DashMap<u32, oneshot::Sender<RpcResponse>>,
}
struct Settings {
status_report_interval: AtomicU64,
}
#[derive(Debug, Clone)]
struct Counter {
pending: u64,
judging: u64,
finished: u64,
}
impl Judger {
pub async fn run(ws_stream: WsStream) -> Result<()> {
let config = inject::<Config>();
let (ws_sink, ws_stream) = ws_stream.split();
let (tx, rx) = mpsc::channel::<WsMessage>(4096);
task::spawn(
ReceiverStream::new(rx)
.map(Ok)
.forward(ws_sink)
.inspect_err(|err| error!(%err, "ws forward error")),
);
let judger = Arc::new(Self {
settings: Settings {
status_report_interval: AtomicU64::new(1000),
},
session: WsSession {
sender: tx,
seq: AtomicU32::new(0),
callbacks: DashMap::new(),
},
counter: Mutex::new(Counter {
pending: 0,
judging: 0,
finished: 0,
}),
rpc_timeout: config.judger.rpc_timeout,
});
task::spawn(judger.clone().report_status_loop());
judger.main_loop(ws_stream).await
}
async fn main_loop(self: Arc<Self>, mut ws_stream: SplitStream<WsStream>) -> Result<()> | };
let _ = self.session.sender.send(Close(Some(close_frame))).await;
return Err(err.into());
}
};
match rpc_msg {
RpcMessage::Request { seq, body,.. } => {
let this = self.clone();
task::spawn(async move {
let response = this.clone().handle_rpc_request(body).await;
let rpc_msg = RpcMessage::Response {
seq,
time: Utc::now(),
body: response,
};
let ws_msg =
WsMessage::text(serde_json::to_string(&rpc_msg).unwrap());
let _ = this.session.sender.send(ws_msg).await;
});
}
RpcMessage::Response { seq, body,.. } => {
match self.session.callbacks.remove(&seq) {
None => warn!(?seq, "no such callback"),
Some((_, cb)) => match cb.send(body) {
Ok(()) => {}
Err(_) => warn!(?seq, "the callback is timeouted"),
},
}
}
}
}
_ => {
warn!("drop ws message");
drop(frame);
}
}
}
Ok(())
}
async fn report_status_loop(self: Arc<Self>) -> Result<()> {
loop {
let delay = self.settings.status_report_interval.load(Relaxed);
time::sleep(Duration::from_millis(delay)).await;
let result = self
.wsrpc(RpcRequest::ReportStatus(ReportStatusArgs {
collect_time: Utc::now(),
next_report_time: Utc::now() + chrono::Duration::milliseconds(delay as i64),
report: None, // FIXME
}))
.await;
let cnt = self.count(|cnt| cnt.clone()).await;
match result {
Ok(RpcResponse::Output(None)) => {
debug!(interval=?delay, count=?cnt, "report status")
}
Ok(RpcResponse::Output(Some(value))) => warn!(?value, "unexpected response"),
Ok(RpcResponse::Error(err)) => warn!(%err, "report status"),
Err(_) => warn!("the request failed"),
}
}
}
async fn handle_rpc_request(self: Arc<Self>, req: RpcRequest) -> RpcResponse {
match req {
RpcRequest::CreateJudge(args) => to_null_response(self.create_judge(args).await),
RpcRequest::Control(args) => to_response(self.control(args).await),
_ => RpcResponse::Error(ErrorInfo {
code: ErrorCode::NotSupported,
message: None,
}),
}
}
async fn wsrpc(&self, req: RpcRequest) -> Result<RpcResponse> {
let session = &self.session;
let seq = session.seq.fetch_add(1, Relaxed).wrapping_add(1);
let (tx, rx) = oneshot::channel();
let rpc_msg = RpcMessage::Request {
seq,
time: Utc::now(),
body: req,
};
let ws_msg = WsMessage::text(serde_json::to_string(&rpc_msg).unwrap());
{
session.callbacks.insert(seq, tx);
session.sender.send(ws_msg).await.unwrap();
}
match time::timeout(Duration::from_millis(self.rpc_timeout), rx).await {
Ok(res) => Ok(res.unwrap()),
Err(err) => {
let _ = session.callbacks.remove(&seq);
return Err(anyhow::Error::new(err));
}
}
}
async fn count<T>(&self, f: impl FnOnce(&mut Counter) -> T) -> T {
let mut counter = self.counter.lock().await;
f(&mut counter)
}
async fn control(
&self,
settings: Option<PartialConnectionSettings>,
) -> Result<ConnectionSettings> {
if let Some(settings) = settings {
if let Some(interval) = settings.status_report_interval {
self.settings
.status_report_interval
.store(interval, Relaxed);
}
}
let current_settings = ConnectionSettings {
status_report_interval: self.settings.status_report_interval.load(Relaxed),
};
Ok(current_settings)
}
async fn create_judge(self: Arc<Self>, judge: CreateJudgeArgs) -> Result<()> {
task::spawn(async move {
self.count(|cnt| cnt.pending += 1).await;
self.count(|cnt| {
cnt.pending -= 1;
cnt.judging += 1;
})
.await;
let finish = FinishJudgeArgs {
id: judge.id.clone(),
result: JudgeResult {
cases: Vec::new(),
extra: None,
},
};
self.count(|cnt| {
cnt.judging -= 1;
cnt.finished += 1;
})
.await;
self.finish_judge(finish).await
});
Ok(())
}
async fn update_judge(&self, update: UpdateJudgeArgs) -> Result<()> {
let res = self.wsrpc(RpcRequest::UpdateJudge(update)).await?;
let output = to_anyhow(res)?;
if output.is_some() {
warn!(?output, "unexpected output")
}
Ok(())
}
async fn finish_judge(&self, finish: FinishJudgeArgs) -> Result<()> {
let res = self.wsrpc(RpcRequest::FinishJudge(finish)).await?;
let output = to_anyhow(res)?;
if output.is_some() {
warn!(?output, "unexpected output")
}
Ok(())
}
}
fn to_response<T: Serialize>(result: Result<T>) -> RpcResponse {
match result {
Ok(value) => {
let raw_value = RawValue::from_string(serde_json::to_string(&value).unwrap()).unwrap();
RpcResponse::Output(Some(raw_value))
}
Err(err) => RpcResponse::Error(ErrorInfo {
code: ErrorCode::UnknownError,
message: Some(err.to_string()),
}),
}
}
fn to_null_response(result: Result<()>) -> RpcResponse {
match result {
Ok(()) => RpcResponse::Output(None),
Err(err) => RpcResponse::Error(ErrorInfo {
code: ErrorCode::UnknownError,
message: Some(err.to_string()),
}),
}
}
fn to_anyhow(response: RpcResponse) -> Result<Option<Box<RawValue>>> {
match response {
RpcResponse::Output(output) => Ok(output),
RpcResponse::Error(err) => Err(anyhow::Error::from(err)),
}
}
| {
info!("starting main loop");
while let Some(frame) = ws_stream.next().await {
use tungstenite::Message::*;
let frame = frame?;
match frame {
Close(reason) => {
warn!(?reason, "ws session closed");
return Ok(());
}
Text(text) => {
let rpc_msg: RpcMessage = match serde_json::from_str(&text) {
Ok(m) => m,
Err(err) => {
error!(%err, "internal protocol: message format error:\n{:?}\n",text);
let close_frame = CloseFrame {
code: CloseCode::Invalid,
reason: "internal protocol message format error".into(), | identifier_body |
judger.rs | use crate::config::Config;
use crate::{WsMessage, WsStream};
use heng_utils::container::inject;
use heng_protocol::common::JudgeResult;
use heng_protocol::error::ErrorCode;
use heng_protocol::internal::{ConnectionSettings, ErrorInfo, PartialConnectionSettings};
use heng_protocol::internal::ws_json::{
CreateJudgeArgs, FinishJudgeArgs, Message as RpcMessage, ReportStatusArgs,
Request as RpcRequest, Response as RpcResponse, UpdateJudgeArgs,
};
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering::Relaxed};
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use chrono::Utc;
use dashmap::DashMap;
use futures::stream::SplitStream;
use futures::StreamExt;
use futures::TryFutureExt;
use serde::Serialize;
use serde_json::value::RawValue;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::{task, time};
use tokio_stream::wrappers::ReceiverStream;
use tokio_tungstenite::tungstenite;
use tracing::{debug, error, info, warn};
use tungstenite::protocol::frame::coding::CloseCode;
use tungstenite::protocol::CloseFrame;
pub struct Judger {
settings: Settings,
counter: Mutex<Counter>,
session: WsSession,
rpc_timeout: u64,
}
struct WsSession {
sender: mpsc::Sender<WsMessage>,
seq: AtomicU32,
callbacks: DashMap<u32, oneshot::Sender<RpcResponse>>,
}
struct Settings {
status_report_interval: AtomicU64,
}
#[derive(Debug, Clone)]
struct Counter {
pending: u64,
judging: u64,
finished: u64,
}
impl Judger {
pub async fn | (ws_stream: WsStream) -> Result<()> {
let config = inject::<Config>();
let (ws_sink, ws_stream) = ws_stream.split();
let (tx, rx) = mpsc::channel::<WsMessage>(4096);
task::spawn(
ReceiverStream::new(rx)
.map(Ok)
.forward(ws_sink)
.inspect_err(|err| error!(%err, "ws forward error")),
);
let judger = Arc::new(Self {
settings: Settings {
status_report_interval: AtomicU64::new(1000),
},
session: WsSession {
sender: tx,
seq: AtomicU32::new(0),
callbacks: DashMap::new(),
},
counter: Mutex::new(Counter {
pending: 0,
judging: 0,
finished: 0,
}),
rpc_timeout: config.judger.rpc_timeout,
});
task::spawn(judger.clone().report_status_loop());
judger.main_loop(ws_stream).await
}
async fn main_loop(self: Arc<Self>, mut ws_stream: SplitStream<WsStream>) -> Result<()> {
info!("starting main loop");
while let Some(frame) = ws_stream.next().await {
use tungstenite::Message::*;
let frame = frame?;
match frame {
Close(reason) => {
warn!(?reason, "ws session closed");
return Ok(());
}
Text(text) => {
let rpc_msg: RpcMessage = match serde_json::from_str(&text) {
Ok(m) => m,
Err(err) => {
error!(%err, "internal protocol: message format error:\n{:?}\n",text);
let close_frame = CloseFrame {
code: CloseCode::Invalid,
reason: "internal protocol message format error".into(),
};
let _ = self.session.sender.send(Close(Some(close_frame))).await;
return Err(err.into());
}
};
match rpc_msg {
RpcMessage::Request { seq, body,.. } => {
let this = self.clone();
task::spawn(async move {
let response = this.clone().handle_rpc_request(body).await;
let rpc_msg = RpcMessage::Response {
seq,
time: Utc::now(),
body: response,
};
let ws_msg =
WsMessage::text(serde_json::to_string(&rpc_msg).unwrap());
let _ = this.session.sender.send(ws_msg).await;
});
}
RpcMessage::Response { seq, body,.. } => {
match self.session.callbacks.remove(&seq) {
None => warn!(?seq, "no such callback"),
Some((_, cb)) => match cb.send(body) {
Ok(()) => {}
Err(_) => warn!(?seq, "the callback is timeouted"),
},
}
}
}
}
_ => {
warn!("drop ws message");
drop(frame);
}
}
}
Ok(())
}
async fn report_status_loop(self: Arc<Self>) -> Result<()> {
loop {
let delay = self.settings.status_report_interval.load(Relaxed);
time::sleep(Duration::from_millis(delay)).await;
let result = self
.wsrpc(RpcRequest::ReportStatus(ReportStatusArgs {
collect_time: Utc::now(),
next_report_time: Utc::now() + chrono::Duration::milliseconds(delay as i64),
report: None, // FIXME
}))
.await;
let cnt = self.count(|cnt| cnt.clone()).await;
match result {
Ok(RpcResponse::Output(None)) => {
debug!(interval=?delay, count=?cnt, "report status")
}
Ok(RpcResponse::Output(Some(value))) => warn!(?value, "unexpected response"),
Ok(RpcResponse::Error(err)) => warn!(%err, "report status"),
Err(_) => warn!("the request failed"),
}
}
}
async fn handle_rpc_request(self: Arc<Self>, req: RpcRequest) -> RpcResponse {
match req {
RpcRequest::CreateJudge(args) => to_null_response(self.create_judge(args).await),
RpcRequest::Control(args) => to_response(self.control(args).await),
_ => RpcResponse::Error(ErrorInfo {
code: ErrorCode::NotSupported,
message: None,
}),
}
}
async fn wsrpc(&self, req: RpcRequest) -> Result<RpcResponse> {
let session = &self.session;
let seq = session.seq.fetch_add(1, Relaxed).wrapping_add(1);
let (tx, rx) = oneshot::channel();
let rpc_msg = RpcMessage::Request {
seq,
time: Utc::now(),
body: req,
};
let ws_msg = WsMessage::text(serde_json::to_string(&rpc_msg).unwrap());
{
session.callbacks.insert(seq, tx);
session.sender.send(ws_msg).await.unwrap();
}
match time::timeout(Duration::from_millis(self.rpc_timeout), rx).await {
Ok(res) => Ok(res.unwrap()),
Err(err) => {
let _ = session.callbacks.remove(&seq);
return Err(anyhow::Error::new(err));
}
}
}
async fn count<T>(&self, f: impl FnOnce(&mut Counter) -> T) -> T {
let mut counter = self.counter.lock().await;
f(&mut counter)
}
async fn control(
&self,
settings: Option<PartialConnectionSettings>,
) -> Result<ConnectionSettings> {
if let Some(settings) = settings {
if let Some(interval) = settings.status_report_interval {
self.settings
.status_report_interval
.store(interval, Relaxed);
}
}
let current_settings = ConnectionSettings {
status_report_interval: self.settings.status_report_interval.load(Relaxed),
};
Ok(current_settings)
}
async fn create_judge(self: Arc<Self>, judge: CreateJudgeArgs) -> Result<()> {
task::spawn(async move {
self.count(|cnt| cnt.pending += 1).await;
self.count(|cnt| {
cnt.pending -= 1;
cnt.judging += 1;
})
.await;
let finish = FinishJudgeArgs {
id: judge.id.clone(),
result: JudgeResult {
cases: Vec::new(),
extra: None,
},
};
self.count(|cnt| {
cnt.judging -= 1;
cnt.finished += 1;
})
.await;
self.finish_judge(finish).await
});
Ok(())
}
async fn update_judge(&self, update: UpdateJudgeArgs) -> Result<()> {
let res = self.wsrpc(RpcRequest::UpdateJudge(update)).await?;
let output = to_anyhow(res)?;
if output.is_some() {
warn!(?output, "unexpected output")
}
Ok(())
}
async fn finish_judge(&self, finish: FinishJudgeArgs) -> Result<()> {
let res = self.wsrpc(RpcRequest::FinishJudge(finish)).await?;
let output = to_anyhow(res)?;
if output.is_some() {
warn!(?output, "unexpected output")
}
Ok(())
}
}
fn to_response<T: Serialize>(result: Result<T>) -> RpcResponse {
match result {
Ok(value) => {
let raw_value = RawValue::from_string(serde_json::to_string(&value).unwrap()).unwrap();
RpcResponse::Output(Some(raw_value))
}
Err(err) => RpcResponse::Error(ErrorInfo {
code: ErrorCode::UnknownError,
message: Some(err.to_string()),
}),
}
}
fn to_null_response(result: Result<()>) -> RpcResponse {
match result {
Ok(()) => RpcResponse::Output(None),
Err(err) => RpcResponse::Error(ErrorInfo {
code: ErrorCode::UnknownError,
message: Some(err.to_string()),
}),
}
}
fn to_anyhow(response: RpcResponse) -> Result<Option<Box<RawValue>>> {
match response {
RpcResponse::Output(output) => Ok(output),
RpcResponse::Error(err) => Err(anyhow::Error::from(err)),
}
}
| run | identifier_name |
judger.rs | use crate::config::Config;
use crate::{WsMessage, WsStream};
use heng_utils::container::inject;
use heng_protocol::common::JudgeResult;
use heng_protocol::error::ErrorCode;
use heng_protocol::internal::{ConnectionSettings, ErrorInfo, PartialConnectionSettings};
use heng_protocol::internal::ws_json::{
CreateJudgeArgs, FinishJudgeArgs, Message as RpcMessage, ReportStatusArgs,
Request as RpcRequest, Response as RpcResponse, UpdateJudgeArgs,
};
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering::Relaxed};
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use chrono::Utc;
use dashmap::DashMap;
use futures::stream::SplitStream;
use futures::StreamExt;
use futures::TryFutureExt;
use serde::Serialize;
use serde_json::value::RawValue;
use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::{task, time};
use tokio_stream::wrappers::ReceiverStream;
use tokio_tungstenite::tungstenite;
use tracing::{debug, error, info, warn};
use tungstenite::protocol::frame::coding::CloseCode;
use tungstenite::protocol::CloseFrame;
pub struct Judger {
settings: Settings,
counter: Mutex<Counter>,
session: WsSession,
rpc_timeout: u64,
}
struct WsSession {
sender: mpsc::Sender<WsMessage>,
seq: AtomicU32,
callbacks: DashMap<u32, oneshot::Sender<RpcResponse>>,
}
struct Settings {
status_report_interval: AtomicU64,
}
#[derive(Debug, Clone)]
struct Counter {
pending: u64,
judging: u64,
finished: u64,
}
impl Judger {
pub async fn run(ws_stream: WsStream) -> Result<()> {
let config = inject::<Config>();
let (ws_sink, ws_stream) = ws_stream.split();
let (tx, rx) = mpsc::channel::<WsMessage>(4096);
task::spawn( |
let judger = Arc::new(Self {
settings: Settings {
status_report_interval: AtomicU64::new(1000),
},
session: WsSession {
sender: tx,
seq: AtomicU32::new(0),
callbacks: DashMap::new(),
},
counter: Mutex::new(Counter {
pending: 0,
judging: 0,
finished: 0,
}),
rpc_timeout: config.judger.rpc_timeout,
});
task::spawn(judger.clone().report_status_loop());
judger.main_loop(ws_stream).await
}
async fn main_loop(self: Arc<Self>, mut ws_stream: SplitStream<WsStream>) -> Result<()> {
info!("starting main loop");
while let Some(frame) = ws_stream.next().await {
use tungstenite::Message::*;
let frame = frame?;
match frame {
Close(reason) => {
warn!(?reason, "ws session closed");
return Ok(());
}
Text(text) => {
let rpc_msg: RpcMessage = match serde_json::from_str(&text) {
Ok(m) => m,
Err(err) => {
error!(%err, "internal protocol: message format error:\n{:?}\n",text);
let close_frame = CloseFrame {
code: CloseCode::Invalid,
reason: "internal protocol message format error".into(),
};
let _ = self.session.sender.send(Close(Some(close_frame))).await;
return Err(err.into());
}
};
match rpc_msg {
RpcMessage::Request { seq, body,.. } => {
let this = self.clone();
task::spawn(async move {
let response = this.clone().handle_rpc_request(body).await;
let rpc_msg = RpcMessage::Response {
seq,
time: Utc::now(),
body: response,
};
let ws_msg =
WsMessage::text(serde_json::to_string(&rpc_msg).unwrap());
let _ = this.session.sender.send(ws_msg).await;
});
}
RpcMessage::Response { seq, body,.. } => {
match self.session.callbacks.remove(&seq) {
None => warn!(?seq, "no such callback"),
Some((_, cb)) => match cb.send(body) {
Ok(()) => {}
Err(_) => warn!(?seq, "the callback is timeouted"),
},
}
}
}
}
_ => {
warn!("drop ws message");
drop(frame);
}
}
}
Ok(())
}
async fn report_status_loop(self: Arc<Self>) -> Result<()> {
loop {
let delay = self.settings.status_report_interval.load(Relaxed);
time::sleep(Duration::from_millis(delay)).await;
let result = self
.wsrpc(RpcRequest::ReportStatus(ReportStatusArgs {
collect_time: Utc::now(),
next_report_time: Utc::now() + chrono::Duration::milliseconds(delay as i64),
report: None, // FIXME
}))
.await;
let cnt = self.count(|cnt| cnt.clone()).await;
match result {
Ok(RpcResponse::Output(None)) => {
debug!(interval=?delay, count=?cnt, "report status")
}
Ok(RpcResponse::Output(Some(value))) => warn!(?value, "unexpected response"),
Ok(RpcResponse::Error(err)) => warn!(%err, "report status"),
Err(_) => warn!("the request failed"),
}
}
}
async fn handle_rpc_request(self: Arc<Self>, req: RpcRequest) -> RpcResponse {
match req {
RpcRequest::CreateJudge(args) => to_null_response(self.create_judge(args).await),
RpcRequest::Control(args) => to_response(self.control(args).await),
_ => RpcResponse::Error(ErrorInfo {
code: ErrorCode::NotSupported,
message: None,
}),
}
}
async fn wsrpc(&self, req: RpcRequest) -> Result<RpcResponse> {
let session = &self.session;
let seq = session.seq.fetch_add(1, Relaxed).wrapping_add(1);
let (tx, rx) = oneshot::channel();
let rpc_msg = RpcMessage::Request {
seq,
time: Utc::now(),
body: req,
};
let ws_msg = WsMessage::text(serde_json::to_string(&rpc_msg).unwrap());
{
session.callbacks.insert(seq, tx);
session.sender.send(ws_msg).await.unwrap();
}
match time::timeout(Duration::from_millis(self.rpc_timeout), rx).await {
Ok(res) => Ok(res.unwrap()),
Err(err) => {
let _ = session.callbacks.remove(&seq);
return Err(anyhow::Error::new(err));
}
}
}
async fn count<T>(&self, f: impl FnOnce(&mut Counter) -> T) -> T {
let mut counter = self.counter.lock().await;
f(&mut counter)
}
async fn control(
&self,
settings: Option<PartialConnectionSettings>,
) -> Result<ConnectionSettings> {
if let Some(settings) = settings {
if let Some(interval) = settings.status_report_interval {
self.settings
.status_report_interval
.store(interval, Relaxed);
}
}
let current_settings = ConnectionSettings {
status_report_interval: self.settings.status_report_interval.load(Relaxed),
};
Ok(current_settings)
}
async fn create_judge(self: Arc<Self>, judge: CreateJudgeArgs) -> Result<()> {
task::spawn(async move {
self.count(|cnt| cnt.pending += 1).await;
self.count(|cnt| {
cnt.pending -= 1;
cnt.judging += 1;
})
.await;
let finish = FinishJudgeArgs {
id: judge.id.clone(),
result: JudgeResult {
cases: Vec::new(),
extra: None,
},
};
self.count(|cnt| {
cnt.judging -= 1;
cnt.finished += 1;
})
.await;
self.finish_judge(finish).await
});
Ok(())
}
async fn update_judge(&self, update: UpdateJudgeArgs) -> Result<()> {
let res = self.wsrpc(RpcRequest::UpdateJudge(update)).await?;
let output = to_anyhow(res)?;
if output.is_some() {
warn!(?output, "unexpected output")
}
Ok(())
}
async fn finish_judge(&self, finish: FinishJudgeArgs) -> Result<()> {
let res = self.wsrpc(RpcRequest::FinishJudge(finish)).await?;
let output = to_anyhow(res)?;
if output.is_some() {
warn!(?output, "unexpected output")
}
Ok(())
}
}
fn to_response<T: Serialize>(result: Result<T>) -> RpcResponse {
match result {
Ok(value) => {
let raw_value = RawValue::from_string(serde_json::to_string(&value).unwrap()).unwrap();
RpcResponse::Output(Some(raw_value))
}
Err(err) => RpcResponse::Error(ErrorInfo {
code: ErrorCode::UnknownError,
message: Some(err.to_string()),
}),
}
}
fn to_null_response(result: Result<()>) -> RpcResponse {
match result {
Ok(()) => RpcResponse::Output(None),
Err(err) => RpcResponse::Error(ErrorInfo {
code: ErrorCode::UnknownError,
message: Some(err.to_string()),
}),
}
}
fn to_anyhow(response: RpcResponse) -> Result<Option<Box<RawValue>>> {
match response {
RpcResponse::Output(output) => Ok(output),
RpcResponse::Error(err) => Err(anyhow::Error::from(err)),
}
} | ReceiverStream::new(rx)
.map(Ok)
.forward(ws_sink)
.inspect_err(|err| error!(%err, "ws forward error")),
); | random_line_split |
compile.rs | use std::fs;
use std::path::{Path, PathBuf};
use codespan_reporting::diagnostic::{Diagnostic, Label};
use codespan_reporting::term::{self, termcolor};
use termcolor::{ColorChoice, StandardStream};
use typst::diag::{bail, Severity, SourceDiagnostic, StrResult};
use typst::doc::Document;
use typst::eval::{eco_format, Tracer};
use typst::geom::Color;
use typst::syntax::{FileId, Source};
use typst::World;
use crate::args::{CompileCommand, DiagnosticFormat, OutputFormat};
use crate::watch::Status;
use crate::world::SystemWorld;
use crate::{color_stream, set_failed};
type CodespanResult<T> = Result<T, CodespanError>;
type CodespanError = codespan_reporting::files::Error;
impl CompileCommand {
/// The output path.
pub fn output(&self) -> PathBuf {
self.output
.clone()
.unwrap_or_else(|| self.common.input.with_extension("pdf"))
}
/// The format to use for generated output, either specified by the user or inferred from the extension.
///
/// Will return `Err` if the format was not specified and could not be inferred.
pub fn output_format(&self) -> StrResult<OutputFormat> {
Ok(if let Some(specified) = self.format {
specified
} else if let Some(output) = &self.output {
match output.extension() {
Some(ext) if ext.eq_ignore_ascii_case("pdf") => OutputFormat::Pdf,
Some(ext) if ext.eq_ignore_ascii_case("png") => OutputFormat::Png,
Some(ext) if ext.eq_ignore_ascii_case("svg") => OutputFormat::Svg,
_ => bail!("could not infer output format for path {}.\nconsider providing the format manually with `--format/-f`", output.display()),
}
} else {
OutputFormat::Pdf
})
}
}
/// Execute a compilation command.
pub fn compile(mut command: CompileCommand) -> StrResult<()> {
let mut world = SystemWorld::new(&command.common)?;
compile_once(&mut world, &mut command, false)?;
Ok(())
}
/// Compile a single time.
///
/// Returns whether it compiled without errors.
#[tracing::instrument(skip_all)]
pub fn compile_once(
world: &mut SystemWorld,
command: &mut CompileCommand,
watching: bool,
) -> StrResult<()> {
tracing::info!("Starting compilation");
let start = std::time::Instant::now();
if watching {
Status::Compiling.print(command).unwrap();
}
// Reset everything and ensure that the main file is present.
world.reset();
world.source(world.main()).map_err(|err| err.to_string())?;
let mut tracer = Tracer::default();
let result = typst::compile(world, &mut tracer);
let warnings = tracer.warnings();
match result {
// Export the PDF / PNG.
Ok(document) => {
export(&document, command)?;
let duration = start.elapsed();
tracing::info!("Compilation succeeded in {duration:?}");
if watching {
if warnings.is_empty() {
Status::Success(duration).print(command).unwrap();
} else {
Status::PartialSuccess(duration).print(command).unwrap();
}
}
print_diagnostics(world, &[], &warnings, command.common.diagnostic_format)
.map_err(|_| "failed to print diagnostics")?;
if let Some(open) = command.open.take() {
open_file(open.as_deref(), &command.output())?;
}
}
// Print diagnostics.
Err(errors) => {
set_failed();
tracing::info!("Compilation failed");
if watching {
Status::Error.print(command).unwrap();
}
print_diagnostics(
world,
&errors,
&warnings,
command.common.diagnostic_format,
)
.map_err(|_| "failed to print diagnostics")?;
}
}
Ok(())
}
/// Export into the target format.
fn export(document: &Document, command: &CompileCommand) -> StrResult<()> {
match command.output_format()? {
OutputFormat::Png => export_image(document, command, ImageExportFormat::Png),
OutputFormat::Svg => export_image(document, command, ImageExportFormat::Svg),
OutputFormat::Pdf => export_pdf(document, command),
}
}
/// Export to a PDF.
fn export_pdf(document: &Document, command: &CompileCommand) -> StrResult<()> {
let output = command.output();
let buffer = typst::export::pdf(document);
fs::write(output, buffer).map_err(|_| "failed to write PDF file")?;
Ok(())
}
/// An image format to export in.
enum ImageExportFormat {
Png,
Svg,
}
/// Export to one or multiple PNGs.
fn export_image(
document: &Document,
command: &CompileCommand,
fmt: ImageExportFormat,
) -> StrResult<()> {
// Determine whether we have a `{n}` numbering.
let output = command.output();
let string = output.to_str().unwrap_or_default();
let numbered = string.contains("{n}");
if!numbered && document.pages.len() > 1 {
bail!("cannot export multiple images without `{{n}}` in output path");
}
// Find a number width that accommodates all pages. For instance, the
// first page should be numbered "001" if there are between 100 and
// 999 pages.
let width = 1 + document.pages.len().checked_ilog10().unwrap_or(0) as usize;
let mut storage;
for (i, frame) in document.pages.iter().enumerate() {
let path = if numbered {
storage = string.replace("{n}", &format!("{:0width$}", i + 1));
Path::new(&storage)
} else {
output.as_path()
};
match fmt {
ImageExportFormat::Png => {
let pixmap =
typst::export::render(frame, command.ppi / 72.0, Color::WHITE);
pixmap.save_png(path).map_err(|_| "failed to write PNG file")?;
}
ImageExportFormat::Svg => {
let svg = typst::export::svg(frame);
fs::write(path, svg).map_err(|_| "failed to write SVG file")?;
}
}
}
Ok(())
}
/// Opens the given file using:
/// - The default file viewer if `open` is `None`.
/// - The given viewer provided by `open` if it is `Some`.
fn open_file(open: Option<&str>, path: &Path) -> StrResult<()> {
if let Some(app) = open {
open::with_in_background(path, app);
} else {
open::that_in_background(path);
}
Ok(())
}
/// Print diagnostic messages to the terminal.
pub fn print_diagnostics(
world: &SystemWorld,
errors: &[SourceDiagnostic],
warnings: &[SourceDiagnostic],
diagnostic_format: DiagnosticFormat,
) -> Result<(), codespan_reporting::files::Error> {
let mut w = match diagnostic_format {
DiagnosticFormat::Human => color_stream(),
DiagnosticFormat::Short => StandardStream::stderr(ColorChoice::Never),
};
let mut config = term::Config { tab_width: 2,..Default::default() };
if diagnostic_format == DiagnosticFormat::Short {
config.display_style = term::DisplayStyle::Short;
} | Severity::Warning => Diagnostic::warning(),
}
.with_message(diagnostic.message.clone())
.with_notes(
diagnostic
.hints
.iter()
.map(|e| (eco_format!("hint: {e}")).into())
.collect(),
)
.with_labels(vec![Label::primary(
diagnostic.span.id(),
world.range(diagnostic.span),
)]);
term::emit(&mut w, &config, world, &diag)?;
// Stacktrace-like helper diagnostics.
for point in &diagnostic.trace {
let message = point.v.to_string();
let help = Diagnostic::help().with_message(message).with_labels(vec![
Label::primary(point.span.id(), world.range(point.span)),
]);
term::emit(&mut w, &config, world, &help)?;
}
}
Ok(())
}
impl<'a> codespan_reporting::files::Files<'a> for SystemWorld {
type FileId = FileId;
type Name = String;
type Source = Source;
fn name(&'a self, id: FileId) -> CodespanResult<Self::Name> {
let vpath = id.vpath();
Ok(if let Some(package) = id.package() {
format!("{package}{}", vpath.as_rooted_path().display())
} else {
// Try to express the path relative to the working directory.
vpath
.resolve(self.root())
.and_then(|abs| pathdiff::diff_paths(&abs, self.workdir()))
.as_deref()
.unwrap_or_else(|| vpath.as_rootless_path())
.to_string_lossy()
.into()
})
}
fn source(&'a self, id: FileId) -> CodespanResult<Self::Source> {
Ok(self.lookup(id))
}
fn line_index(&'a self, id: FileId, given: usize) -> CodespanResult<usize> {
let source = self.lookup(id);
source
.byte_to_line(given)
.ok_or_else(|| CodespanError::IndexTooLarge {
given,
max: source.len_bytes(),
})
}
fn line_range(
&'a self,
id: FileId,
given: usize,
) -> CodespanResult<std::ops::Range<usize>> {
let source = self.lookup(id);
source
.line_to_range(given)
.ok_or_else(|| CodespanError::LineTooLarge { given, max: source.len_lines() })
}
fn column_number(
&'a self,
id: FileId,
_: usize,
given: usize,
) -> CodespanResult<usize> {
let source = self.lookup(id);
source.byte_to_column(given).ok_or_else(|| {
let max = source.len_bytes();
if given <= max {
CodespanError::InvalidCharBoundary { given }
} else {
CodespanError::IndexTooLarge { given, max }
}
})
}
} |
for diagnostic in warnings.iter().chain(errors.iter()) {
let diag = match diagnostic.severity {
Severity::Error => Diagnostic::error(), | random_line_split |
compile.rs | use std::fs;
use std::path::{Path, PathBuf};
use codespan_reporting::diagnostic::{Diagnostic, Label};
use codespan_reporting::term::{self, termcolor};
use termcolor::{ColorChoice, StandardStream};
use typst::diag::{bail, Severity, SourceDiagnostic, StrResult};
use typst::doc::Document;
use typst::eval::{eco_format, Tracer};
use typst::geom::Color;
use typst::syntax::{FileId, Source};
use typst::World;
use crate::args::{CompileCommand, DiagnosticFormat, OutputFormat};
use crate::watch::Status;
use crate::world::SystemWorld;
use crate::{color_stream, set_failed};
type CodespanResult<T> = Result<T, CodespanError>;
type CodespanError = codespan_reporting::files::Error;
impl CompileCommand {
/// The output path.
pub fn output(&self) -> PathBuf {
self.output
.clone()
.unwrap_or_else(|| self.common.input.with_extension("pdf"))
}
/// The format to use for generated output, either specified by the user or inferred from the extension.
///
/// Will return `Err` if the format was not specified and could not be inferred.
pub fn output_format(&self) -> StrResult<OutputFormat> {
Ok(if let Some(specified) = self.format {
specified
} else if let Some(output) = &self.output {
match output.extension() {
Some(ext) if ext.eq_ignore_ascii_case("pdf") => OutputFormat::Pdf,
Some(ext) if ext.eq_ignore_ascii_case("png") => OutputFormat::Png,
Some(ext) if ext.eq_ignore_ascii_case("svg") => OutputFormat::Svg,
_ => bail!("could not infer output format for path {}.\nconsider providing the format manually with `--format/-f`", output.display()),
}
} else {
OutputFormat::Pdf
})
}
}
/// Execute a compilation command.
pub fn compile(mut command: CompileCommand) -> StrResult<()> {
let mut world = SystemWorld::new(&command.common)?;
compile_once(&mut world, &mut command, false)?;
Ok(())
}
/// Compile a single time.
///
/// Returns whether it compiled without errors.
#[tracing::instrument(skip_all)]
pub fn compile_once(
world: &mut SystemWorld,
command: &mut CompileCommand,
watching: bool,
) -> StrResult<()> {
tracing::info!("Starting compilation");
let start = std::time::Instant::now();
if watching {
Status::Compiling.print(command).unwrap();
}
// Reset everything and ensure that the main file is present.
world.reset();
world.source(world.main()).map_err(|err| err.to_string())?;
let mut tracer = Tracer::default();
let result = typst::compile(world, &mut tracer);
let warnings = tracer.warnings();
match result {
// Export the PDF / PNG.
Ok(document) => {
export(&document, command)?;
let duration = start.elapsed();
tracing::info!("Compilation succeeded in {duration:?}");
if watching {
if warnings.is_empty() {
Status::Success(duration).print(command).unwrap();
} else {
Status::PartialSuccess(duration).print(command).unwrap();
}
}
print_diagnostics(world, &[], &warnings, command.common.diagnostic_format)
.map_err(|_| "failed to print diagnostics")?;
if let Some(open) = command.open.take() {
open_file(open.as_deref(), &command.output())?;
}
}
// Print diagnostics.
Err(errors) => {
set_failed();
tracing::info!("Compilation failed");
if watching {
Status::Error.print(command).unwrap();
}
print_diagnostics(
world,
&errors,
&warnings,
command.common.diagnostic_format,
)
.map_err(|_| "failed to print diagnostics")?;
}
}
Ok(())
}
/// Export into the target format.
fn export(document: &Document, command: &CompileCommand) -> StrResult<()> {
match command.output_format()? {
OutputFormat::Png => export_image(document, command, ImageExportFormat::Png),
OutputFormat::Svg => export_image(document, command, ImageExportFormat::Svg),
OutputFormat::Pdf => export_pdf(document, command),
}
}
/// Export to a PDF.
fn export_pdf(document: &Document, command: &CompileCommand) -> StrResult<()> {
let output = command.output();
let buffer = typst::export::pdf(document);
fs::write(output, buffer).map_err(|_| "failed to write PDF file")?;
Ok(())
}
/// An image format to export in.
enum ImageExportFormat {
Png,
Svg,
}
/// Export to one or multiple PNGs.
fn export_image(
document: &Document,
command: &CompileCommand,
fmt: ImageExportFormat,
) -> StrResult<()> {
// Determine whether we have a `{n}` numbering.
let output = command.output();
let string = output.to_str().unwrap_or_default();
let numbered = string.contains("{n}");
if!numbered && document.pages.len() > 1 {
bail!("cannot export multiple images without `{{n}}` in output path");
}
// Find a number width that accommodates all pages. For instance, the
// first page should be numbered "001" if there are between 100 and
// 999 pages.
let width = 1 + document.pages.len().checked_ilog10().unwrap_or(0) as usize;
let mut storage;
for (i, frame) in document.pages.iter().enumerate() {
let path = if numbered {
storage = string.replace("{n}", &format!("{:0width$}", i + 1));
Path::new(&storage)
} else {
output.as_path()
};
match fmt {
ImageExportFormat::Png => {
let pixmap =
typst::export::render(frame, command.ppi / 72.0, Color::WHITE);
pixmap.save_png(path).map_err(|_| "failed to write PNG file")?;
}
ImageExportFormat::Svg => {
let svg = typst::export::svg(frame);
fs::write(path, svg).map_err(|_| "failed to write SVG file")?;
}
}
}
Ok(())
}
/// Opens the given file using:
/// - The default file viewer if `open` is `None`.
/// - The given viewer provided by `open` if it is `Some`.
fn open_file(open: Option<&str>, path: &Path) -> StrResult<()> {
if let Some(app) = open {
open::with_in_background(path, app);
} else {
open::that_in_background(path);
}
Ok(())
}
/// Print diagnostic messages to the terminal.
pub fn print_diagnostics(
world: &SystemWorld,
errors: &[SourceDiagnostic],
warnings: &[SourceDiagnostic],
diagnostic_format: DiagnosticFormat,
) -> Result<(), codespan_reporting::files::Error> | .iter()
.map(|e| (eco_format!("hint: {e}")).into())
.collect(),
)
.with_labels(vec![Label::primary(
diagnostic.span.id(),
world.range(diagnostic.span),
)]);
term::emit(&mut w, &config, world, &diag)?;
// Stacktrace-like helper diagnostics.
for point in &diagnostic.trace {
let message = point.v.to_string();
let help = Diagnostic::help().with_message(message).with_labels(vec![
Label::primary(point.span.id(), world.range(point.span)),
]);
term::emit(&mut w, &config, world, &help)?;
}
}
Ok(())
}
impl<'a> codespan_reporting::files::Files<'a> for SystemWorld {
type FileId = FileId;
type Name = String;
type Source = Source;
fn name(&'a self, id: FileId) -> CodespanResult<Self::Name> {
let vpath = id.vpath();
Ok(if let Some(package) = id.package() {
format!("{package}{}", vpath.as_rooted_path().display())
} else {
// Try to express the path relative to the working directory.
vpath
.resolve(self.root())
.and_then(|abs| pathdiff::diff_paths(&abs, self.workdir()))
.as_deref()
.unwrap_or_else(|| vpath.as_rootless_path())
.to_string_lossy()
.into()
})
}
fn source(&'a self, id: FileId) -> CodespanResult<Self::Source> {
Ok(self.lookup(id))
}
fn line_index(&'a self, id: FileId, given: usize) -> CodespanResult<usize> {
let source = self.lookup(id);
source
.byte_to_line(given)
.ok_or_else(|| CodespanError::IndexTooLarge {
given,
max: source.len_bytes(),
})
}
fn line_range(
&'a self,
id: FileId,
given: usize,
) -> CodespanResult<std::ops::Range<usize>> {
let source = self.lookup(id);
source
.line_to_range(given)
.ok_or_else(|| CodespanError::LineTooLarge { given, max: source.len_lines() })
}
fn column_number(
&'a self,
id: FileId,
_: usize,
given: usize,
) -> CodespanResult<usize> {
let source = self.lookup(id);
source.byte_to_column(given).ok_or_else(|| {
let max = source.len_bytes();
if given <= max {
CodespanError::InvalidCharBoundary { given }
} else {
CodespanError::IndexTooLarge { given, max }
}
})
}
}
| {
let mut w = match diagnostic_format {
DiagnosticFormat::Human => color_stream(),
DiagnosticFormat::Short => StandardStream::stderr(ColorChoice::Never),
};
let mut config = term::Config { tab_width: 2, ..Default::default() };
if diagnostic_format == DiagnosticFormat::Short {
config.display_style = term::DisplayStyle::Short;
}
for diagnostic in warnings.iter().chain(errors.iter()) {
let diag = match diagnostic.severity {
Severity::Error => Diagnostic::error(),
Severity::Warning => Diagnostic::warning(),
}
.with_message(diagnostic.message.clone())
.with_notes(
diagnostic
.hints | identifier_body |
compile.rs | use std::fs;
use std::path::{Path, PathBuf};
use codespan_reporting::diagnostic::{Diagnostic, Label};
use codespan_reporting::term::{self, termcolor};
use termcolor::{ColorChoice, StandardStream};
use typst::diag::{bail, Severity, SourceDiagnostic, StrResult};
use typst::doc::Document;
use typst::eval::{eco_format, Tracer};
use typst::geom::Color;
use typst::syntax::{FileId, Source};
use typst::World;
use crate::args::{CompileCommand, DiagnosticFormat, OutputFormat};
use crate::watch::Status;
use crate::world::SystemWorld;
use crate::{color_stream, set_failed};
type CodespanResult<T> = Result<T, CodespanError>;
type CodespanError = codespan_reporting::files::Error;
impl CompileCommand {
/// The output path.
pub fn output(&self) -> PathBuf {
self.output
.clone()
.unwrap_or_else(|| self.common.input.with_extension("pdf"))
}
/// The format to use for generated output, either specified by the user or inferred from the extension.
///
/// Will return `Err` if the format was not specified and could not be inferred.
pub fn output_format(&self) -> StrResult<OutputFormat> {
Ok(if let Some(specified) = self.format {
specified
} else if let Some(output) = &self.output {
match output.extension() {
Some(ext) if ext.eq_ignore_ascii_case("pdf") => OutputFormat::Pdf,
Some(ext) if ext.eq_ignore_ascii_case("png") => OutputFormat::Png,
Some(ext) if ext.eq_ignore_ascii_case("svg") => OutputFormat::Svg,
_ => bail!("could not infer output format for path {}.\nconsider providing the format manually with `--format/-f`", output.display()),
}
} else {
OutputFormat::Pdf
})
}
}
/// Execute a compilation command.
pub fn compile(mut command: CompileCommand) -> StrResult<()> {
let mut world = SystemWorld::new(&command.common)?;
compile_once(&mut world, &mut command, false)?;
Ok(())
}
/// Compile a single time.
///
/// Returns whether it compiled without errors.
#[tracing::instrument(skip_all)]
pub fn compile_once(
world: &mut SystemWorld,
command: &mut CompileCommand,
watching: bool,
) -> StrResult<()> {
tracing::info!("Starting compilation");
let start = std::time::Instant::now();
if watching {
Status::Compiling.print(command).unwrap();
}
// Reset everything and ensure that the main file is present.
world.reset();
world.source(world.main()).map_err(|err| err.to_string())?;
let mut tracer = Tracer::default();
let result = typst::compile(world, &mut tracer);
let warnings = tracer.warnings();
match result {
// Export the PDF / PNG.
Ok(document) => {
export(&document, command)?;
let duration = start.elapsed();
tracing::info!("Compilation succeeded in {duration:?}");
if watching {
if warnings.is_empty() {
Status::Success(duration).print(command).unwrap();
} else {
Status::PartialSuccess(duration).print(command).unwrap();
}
}
print_diagnostics(world, &[], &warnings, command.common.diagnostic_format)
.map_err(|_| "failed to print diagnostics")?;
if let Some(open) = command.open.take() {
open_file(open.as_deref(), &command.output())?;
}
}
// Print diagnostics.
Err(errors) => {
set_failed();
tracing::info!("Compilation failed");
if watching {
Status::Error.print(command).unwrap();
}
print_diagnostics(
world,
&errors,
&warnings,
command.common.diagnostic_format,
)
.map_err(|_| "failed to print diagnostics")?;
}
}
Ok(())
}
/// Export into the target format.
fn export(document: &Document, command: &CompileCommand) -> StrResult<()> {
match command.output_format()? {
OutputFormat::Png => export_image(document, command, ImageExportFormat::Png),
OutputFormat::Svg => export_image(document, command, ImageExportFormat::Svg),
OutputFormat::Pdf => export_pdf(document, command),
}
}
/// Export to a PDF.
fn export_pdf(document: &Document, command: &CompileCommand) -> StrResult<()> {
let output = command.output();
let buffer = typst::export::pdf(document);
fs::write(output, buffer).map_err(|_| "failed to write PDF file")?;
Ok(())
}
/// An image format to export in.
enum ImageExportFormat {
Png,
Svg,
}
/// Export to one or multiple PNGs.
fn export_image(
document: &Document,
command: &CompileCommand,
fmt: ImageExportFormat,
) -> StrResult<()> {
// Determine whether we have a `{n}` numbering.
let output = command.output();
let string = output.to_str().unwrap_or_default();
let numbered = string.contains("{n}");
if!numbered && document.pages.len() > 1 {
bail!("cannot export multiple images without `{{n}}` in output path");
}
// Find a number width that accommodates all pages. For instance, the
// first page should be numbered "001" if there are between 100 and
// 999 pages.
let width = 1 + document.pages.len().checked_ilog10().unwrap_or(0) as usize;
let mut storage;
for (i, frame) in document.pages.iter().enumerate() {
let path = if numbered {
storage = string.replace("{n}", &format!("{:0width$}", i + 1));
Path::new(&storage)
} else {
output.as_path()
};
match fmt {
ImageExportFormat::Png => {
let pixmap =
typst::export::render(frame, command.ppi / 72.0, Color::WHITE);
pixmap.save_png(path).map_err(|_| "failed to write PNG file")?;
}
ImageExportFormat::Svg => {
let svg = typst::export::svg(frame);
fs::write(path, svg).map_err(|_| "failed to write SVG file")?;
}
}
}
Ok(())
}
/// Opens the given file using:
/// - The default file viewer if `open` is `None`.
/// - The given viewer provided by `open` if it is `Some`.
fn open_file(open: Option<&str>, path: &Path) -> StrResult<()> {
if let Some(app) = open {
open::with_in_background(path, app);
} else {
open::that_in_background(path);
}
Ok(())
}
/// Print diagnostic messages to the terminal.
pub fn print_diagnostics(
world: &SystemWorld,
errors: &[SourceDiagnostic],
warnings: &[SourceDiagnostic],
diagnostic_format: DiagnosticFormat,
) -> Result<(), codespan_reporting::files::Error> {
let mut w = match diagnostic_format {
DiagnosticFormat::Human => color_stream(),
DiagnosticFormat::Short => StandardStream::stderr(ColorChoice::Never),
};
let mut config = term::Config { tab_width: 2,..Default::default() };
if diagnostic_format == DiagnosticFormat::Short {
config.display_style = term::DisplayStyle::Short;
}
for diagnostic in warnings.iter().chain(errors.iter()) {
let diag = match diagnostic.severity {
Severity::Error => Diagnostic::error(),
Severity::Warning => Diagnostic::warning(),
}
.with_message(diagnostic.message.clone())
.with_notes(
diagnostic
.hints
.iter()
.map(|e| (eco_format!("hint: {e}")).into())
.collect(),
)
.with_labels(vec![Label::primary(
diagnostic.span.id(),
world.range(diagnostic.span),
)]);
term::emit(&mut w, &config, world, &diag)?;
// Stacktrace-like helper diagnostics.
for point in &diagnostic.trace {
let message = point.v.to_string();
let help = Diagnostic::help().with_message(message).with_labels(vec![
Label::primary(point.span.id(), world.range(point.span)),
]);
term::emit(&mut w, &config, world, &help)?;
}
}
Ok(())
}
impl<'a> codespan_reporting::files::Files<'a> for SystemWorld {
type FileId = FileId;
type Name = String;
type Source = Source;
fn | (&'a self, id: FileId) -> CodespanResult<Self::Name> {
let vpath = id.vpath();
Ok(if let Some(package) = id.package() {
format!("{package}{}", vpath.as_rooted_path().display())
} else {
// Try to express the path relative to the working directory.
vpath
.resolve(self.root())
.and_then(|abs| pathdiff::diff_paths(&abs, self.workdir()))
.as_deref()
.unwrap_or_else(|| vpath.as_rootless_path())
.to_string_lossy()
.into()
})
}
fn source(&'a self, id: FileId) -> CodespanResult<Self::Source> {
Ok(self.lookup(id))
}
fn line_index(&'a self, id: FileId, given: usize) -> CodespanResult<usize> {
let source = self.lookup(id);
source
.byte_to_line(given)
.ok_or_else(|| CodespanError::IndexTooLarge {
given,
max: source.len_bytes(),
})
}
fn line_range(
&'a self,
id: FileId,
given: usize,
) -> CodespanResult<std::ops::Range<usize>> {
let source = self.lookup(id);
source
.line_to_range(given)
.ok_or_else(|| CodespanError::LineTooLarge { given, max: source.len_lines() })
}
fn column_number(
&'a self,
id: FileId,
_: usize,
given: usize,
) -> CodespanResult<usize> {
let source = self.lookup(id);
source.byte_to_column(given).ok_or_else(|| {
let max = source.len_bytes();
if given <= max {
CodespanError::InvalidCharBoundary { given }
} else {
CodespanError::IndexTooLarge { given, max }
}
})
}
}
| name | identifier_name |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
#[macro_use]
extern crate error_chain;
use std::cell::{Cell, RefCell};
use std::f32;
use std::path::PathBuf;
use std::rc::Rc;
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use directories::ProjectDirs;
use lazy_static::lazy_static;
use serde_derive::Deserialize;
use gelatin::glium::glutin::{
dpi::{PhysicalPosition, PhysicalSize},
event::WindowEvent,
window::Icon,
};
use gelatin::{
application::*,
button::*,
image,
label::*,
line_layout_container::*,
misc::*,
picture::*,
slider::*,
window::{Window, WindowDescriptorBuilder},
NextUpdate, Widget,
};
use crate::configuration::{Cache, Configuration};
use crate::help_screen::*;
use crate::picture_widget::*;
#[cfg(feature = "networking")]
use crate::version::Version;
mod configuration;
mod handle_panic;
mod help_screen;
mod image_cache;
mod picture_widget;
mod playback_manager;
mod shaders;
mod utils;
#[cfg(feature = "networking")]
mod version;
lazy_static! {
pub static ref PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from("", "", "emulsion");
}
// ========================================================
// Not-so glorious main function
// ========================================================
fn main() {
std::panic::set_hook(Box::new(handle_panic::handle_panic));
let img = image::load_from_memory(include_bytes!("../resource/emulsion48.png")).unwrap();
let rgba = img.into_rgba();
let (w, h) = rgba.dimensions();
let icon = Icon::from_rgba(rgba.into_raw(), w, h).unwrap();
// Load configuration and cache files
let (config_path, cache_path) = get_config_and_cache_paths();
let cache = Cache::load(&cache_path);
let config = Configuration::load(&config_path);
let first_launch = cache.is_err();
let cache = Arc::new(Mutex::new(cache.unwrap_or_default()));
let config = Rc::new(RefCell::new(config.unwrap_or_default()));
let mut application = Application::new();
let window: Rc<Window>;
{
let cache = cache.lock().unwrap();
let window_desc = WindowDescriptorBuilder::default()
.icon(Some(icon))
.size(PhysicalSize::new(cache.window.win_w, cache.window.win_h))
.position(Some(PhysicalPosition::new(cache.window.win_x, cache.window.win_y)))
.build()
.unwrap();
window = Window::new(&mut application, window_desc);
}
{
let cache = cache.clone();
window.add_global_event_handler(move |event| match event {
WindowEvent::Resized(new_size) => {
let mut cache = cache.lock().unwrap();
cache.window.win_w = new_size.width;
cache.window.win_h = new_size.height;
}
WindowEvent::Moved(new_pos) => {
let mut cache = cache.lock().unwrap();
cache.window.win_x = new_pos.x;
cache.window.win_y = new_pos.y;
}
_ => (),
});
}
let vertical_container = Rc::new(VerticalLayoutContainer::new());
vertical_container.set_margin_all(0.0);
vertical_container.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
vertical_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let picture_area_container = Rc::new(VerticalLayoutContainer::new());
picture_area_container.set_margin_all(0.0);
picture_area_container.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
picture_area_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let update_notification = Rc::new(HorizontalLayoutContainer::new());
let update_label = Rc::new(Label::new());
let update_label_image = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/new-version-available.png"
)));
let update_label_image_light = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/new-version-available-light.png"
)));
{
update_notification.set_vertical_align(Alignment::End);
update_notification.set_horizontal_align(Alignment::Start);
update_notification.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
update_notification.set_height(Length::Fixed(32.0));
update_label.set_icon(Some(update_label_image.clone()));
update_label.set_margin_top(4.0);
update_label.set_margin_bottom(4.0);
update_label.set_fixed_size(LogicalVector::new(200.0, 24.0));
update_label.set_horizontal_align(Alignment::Center);
let update_button = Rc::new(Button::new());
let button_image =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/visit-site.png")));
update_button.set_icon(Some(button_image));
update_button.set_margin_top(4.0);
update_button.set_margin_bottom(4.0);
update_button.set_fixed_size(LogicalVector::new(100.0, 24.0));
update_button.set_horizontal_align(Alignment::Center);
update_button.set_on_click(|| {
open::that("https://arturkovacs.github.io/emulsion-website/").unwrap();
});
update_notification.add_child(update_label.clone());
update_notification.add_child(update_button);
}
let usage_img = Picture::from_encoded_bytes(include_bytes!("../resource/usage.png"));
let help_screen = Rc::new(HelpScreen::new(usage_img));
let bottom_container = Rc::new(HorizontalLayoutContainer::new());
//bottom_container.set_margin_top(4.0);
//bottom_container.set_margin_bottom(4.0);
bottom_container.set_margin_left(0.0);
bottom_container.set_margin_right(0.0);
bottom_container.set_height(Length::Fixed(32.0));
bottom_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let moon_img = Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/moon.png")));
let light_img = Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/light.png")));
let theme_button = Rc::new(Button::new());
theme_button.set_margin_top(5.0);
theme_button.set_margin_left(28.0);
theme_button.set_margin_right(4.0);
theme_button.set_height(Length::Fixed(24.0));
theme_button.set_width(Length::Fixed(24.0));
theme_button.set_horizontal_align(Alignment::Center);
theme_button.set_icon(Some(moon_img.clone()));
let question =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question_button.png")));
let question_light = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/question_button_light.png"
)));
let question_noti =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question-noti.png")));
let question_light_noti =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question-light-noti.png")));
let help_button = Rc::new(Button::new());
help_button.set_margin_top(5.0);
help_button.set_margin_left(4.0);
help_button.set_margin_right(28.0);
help_button.set_height(Length::Fixed(24.0));
help_button.set_width(Length::Fixed(24.0));
help_button.set_horizontal_align(Alignment::Center);
help_button.set_icon(Some(question.clone()));
let slider = Rc::new(Slider::new());
slider.set_margin_top(5.0);
slider.set_margin_left(4.0);
slider.set_margin_right(4.0);
slider.set_height(Length::Fixed(24.0));
slider.set_width(Length::Stretch { min: 0.0, max: 600.0 });
slider.set_horizontal_align(Alignment::Center);
slider.set_steps(6, 1);
let picture_widget = Rc::new(PictureWidget::new(
&window.display_mut(),
&window,
slider.clone(),
bottom_container.clone(),
config.clone(),
));
picture_widget.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
picture_widget.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
if let Some(file_path) = std::env::args().nth(1) {
picture_widget.jump_to_path(file_path);
}
bottom_container.add_child(theme_button.clone());
bottom_container.add_child(slider.clone());
bottom_container.add_child(help_button.clone());
picture_area_container.add_child(picture_widget.clone());
picture_area_container.add_child(help_screen.clone());
picture_area_container.add_child(update_notification.clone());
vertical_container.add_child(picture_area_container);
vertical_container.add_child(bottom_container.clone());
let update_available = Arc::new(AtomicBool::new(false));
let update_check_done = Arc::new(AtomicBool::new(false));
let light_theme = Rc::new(Cell::new(!cache.lock().unwrap().window.dark));
let theme_button_clone = theme_button.clone();
let help_button_clone = help_button.clone();
let update_label_clone = update_label;
let picture_widget_clone = picture_widget.clone();
let bottom_container_clone = bottom_container;
let update_notification_clone = update_notification.clone();
let slider_clone = slider.clone();
let window_clone = window.clone();
let light_theme_clone = light_theme.clone();
let update_available_clone = update_available.clone();
let set_theme = Rc::new(move || {
if light_theme_clone.get() {
picture_widget_clone.set_bright_shade(0.96);
bottom_container_clone.set_bg_color([1.0, 1.0, 1.0, 1.0]);
slider_clone.set_shadow_color([0.0, 0.0, 0.0]);
window_clone.set_bg_color([0.85, 0.85, 0.85, 1.0]);
theme_button_clone.set_icon(Some(moon_img.clone()));
update_notification_clone.set_bg_color([0.06, 0.06, 0.06, 1.0]);
update_label_clone.set_icon(Some(update_label_image_light.clone()));
if update_available_clone.load(Ordering::SeqCst) {
help_button_clone.set_icon(Some(question_noti.clone()));
} else {
help_button_clone.set_icon(Some(question.clone()));
}
} else {
picture_widget_clone.set_bright_shade(0.11);
bottom_container_clone.set_bg_color([0.08, 0.08, 0.08, 1.0]);
slider_clone.set_shadow_color([0.0, 0.0, 0.0]);
window_clone.set_bg_color([0.03, 0.03, 0.03, 1.0]);
theme_button_clone.set_icon(Some(light_img.clone()));
update_notification_clone.set_bg_color([0.85, 0.85, 0.85, 1.0]);
update_label_clone.set_icon(Some(update_label_image.clone()));
if update_available_clone.load(Ordering::SeqCst) {
help_button_clone.set_icon(Some(question_light_noti.clone()));
} else {
help_button_clone.set_icon(Some(question_light.clone()));
}
}
});
set_theme();
{
let cache = cache.clone();
let set_theme = set_theme.clone();
theme_button.set_on_click(move || {
light_theme.set(!light_theme.get());
cache.lock().unwrap().window.dark =!light_theme.get();
set_theme();
});
}
let slider_clone2 = slider.clone();
let image_widget_clone = picture_widget;
slider.set_on_value_change(move || {
image_widget_clone.jump_to_index(slider_clone2.value());
});
let help_visible = Cell::new(first_launch);
help_screen.set_visible(help_visible.get());
let update_available_clone = update_available.clone();
let help_screen_clone = help_screen.clone();
let update_notification_clone = update_notification.clone();
update_notification
.set_visible(help_visible.get() && update_available_clone.load(Ordering::SeqCst));
help_button.set_on_click(move || {
help_visible.set(!help_visible.get());
help_screen_clone.set_visible(help_visible.get());
update_notification_clone
.set_visible(help_visible.get() && update_available_clone.load(Ordering::SeqCst));
});
window.set_root(vertical_container);
let check_updates_enabled = match &config.borrow().updates {
Some(u) if!u.check_updates => false,
_ => true,
};
let update_checker_join_handle = {
let updates = &mut cache.lock().unwrap().updates;
let cache = cache.clone();
let update_available = update_available.clone();
let update_check_done = update_check_done.clone();
if check_updates_enabled && updates.update_check_needed() {
// kick off a thread that will check for an update in the background
Some(std::thread::spawn(move || {
let has_update = check_for_updates();
update_available.store(has_update, Ordering::SeqCst);
update_check_done.store(true, Ordering::SeqCst);
if!has_update {
cache.lock().unwrap().updates.set_update_check_time();
}
}))
} else {
None
}
};
let mut nothing_to_do = false;
application.add_global_event_handler(move |_| {
if nothing_to_do {
return NextUpdate::Latest;
}
if update_check_done.load(Ordering::SeqCst) {
nothing_to_do = true;
set_theme();
if help_screen.visible() && update_available.load(Ordering::SeqCst) {
update_notification.set_visible(true);
}
}
NextUpdate::WaitUntil(Instant::now() + Duration::from_secs(1))
});
application.set_at_exit(Some(move || {
cache.lock().unwrap().save(cache_path).unwrap();
if let Some(h) = update_checker_join_handle {
h.join().unwrap();
}
}));
application.start_event_loop();
}
// ========================================================
#[derive(Deserialize)]
struct ReleaseInfoJson {
tag_name: String,
}
fn get_config_and_cache_paths() -> (PathBuf, PathBuf) {
let config_folder;
let cache_folder;
if let Some(ref project_dirs) = *PROJECT_DIRS {
config_folder = project_dirs.config_dir().to_owned();
cache_folder = project_dirs.cache_dir().to_owned();
} else {
let exe_path = std::env::current_exe().unwrap();
let exe_folder = exe_path.parent().unwrap();
config_folder = exe_folder.to_owned();
cache_folder = exe_folder.to_owned();
}
if!config_folder.exists() {
std::fs::create_dir_all(&config_folder).unwrap();
}
if!cache_folder.exists() {
std::fs::create_dir_all(&cache_folder).unwrap();
}
(config_folder.join("cfg.toml"), cache_folder.join("cache.toml"))
} |
#[cfg(not(feature = "networking"))]
/// Always returns false without the `networking` feature.
fn check_for_updates() -> bool {
false
}
#[cfg(feature = "networking")]
/// Returns true if updates are available.
fn check_for_updates() -> bool {
let client;
match reqwest::blocking::Client::builder().user_agent("emulsion").build() {
Ok(c) => client = c,
Err(e) => {
println!("Could not build client for version request: {}", e);
return false;
}
}
let response =
client.get("https://api.github.com/repos/ArturKovacs/emulsion/releases/latest").send();
match response {
Ok(response) => match response.json::<ReleaseInfoJson>() {
Ok(info) => {
println!("Found latest version tag {}", info.tag_name);
let current = Version::cargo_pkg_version();
println!("Current version is '{}'", current);
match Version::from_str(&info.tag_name) {
Ok(latest) => {
println!("Parsed latest version is '{}'", latest);
if latest > current {
return true;
}
}
Err(error) => {
println!("Error parsing version: {}", error.to_string());
}
}
}
Err(e) => println!("Failed to create json from response: {}", e),
},
Err(e) => println!("Failed to get latest version info: {}", e),
}
false
} | random_line_split |
|
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
#[macro_use]
extern crate error_chain;
use std::cell::{Cell, RefCell};
use std::f32;
use std::path::PathBuf;
use std::rc::Rc;
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use directories::ProjectDirs;
use lazy_static::lazy_static;
use serde_derive::Deserialize;
use gelatin::glium::glutin::{
dpi::{PhysicalPosition, PhysicalSize},
event::WindowEvent,
window::Icon,
};
use gelatin::{
application::*,
button::*,
image,
label::*,
line_layout_container::*,
misc::*,
picture::*,
slider::*,
window::{Window, WindowDescriptorBuilder},
NextUpdate, Widget,
};
use crate::configuration::{Cache, Configuration};
use crate::help_screen::*;
use crate::picture_widget::*;
#[cfg(feature = "networking")]
use crate::version::Version;
mod configuration;
mod handle_panic;
mod help_screen;
mod image_cache;
mod picture_widget;
mod playback_manager;
mod shaders;
mod utils;
#[cfg(feature = "networking")]
mod version;
lazy_static! {
pub static ref PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from("", "", "emulsion");
}
// ========================================================
// Not-so glorious main function
// ========================================================
fn main() {
std::panic::set_hook(Box::new(handle_panic::handle_panic));
let img = image::load_from_memory(include_bytes!("../resource/emulsion48.png")).unwrap();
let rgba = img.into_rgba();
let (w, h) = rgba.dimensions();
let icon = Icon::from_rgba(rgba.into_raw(), w, h).unwrap();
// Load configuration and cache files
let (config_path, cache_path) = get_config_and_cache_paths();
let cache = Cache::load(&cache_path);
let config = Configuration::load(&config_path);
let first_launch = cache.is_err();
let cache = Arc::new(Mutex::new(cache.unwrap_or_default()));
let config = Rc::new(RefCell::new(config.unwrap_or_default()));
let mut application = Application::new();
let window: Rc<Window>;
{
let cache = cache.lock().unwrap();
let window_desc = WindowDescriptorBuilder::default()
.icon(Some(icon))
.size(PhysicalSize::new(cache.window.win_w, cache.window.win_h))
.position(Some(PhysicalPosition::new(cache.window.win_x, cache.window.win_y)))
.build()
.unwrap();
window = Window::new(&mut application, window_desc);
}
{
let cache = cache.clone();
window.add_global_event_handler(move |event| match event {
WindowEvent::Resized(new_size) => {
let mut cache = cache.lock().unwrap();
cache.window.win_w = new_size.width;
cache.window.win_h = new_size.height;
}
WindowEvent::Moved(new_pos) => {
let mut cache = cache.lock().unwrap();
cache.window.win_x = new_pos.x;
cache.window.win_y = new_pos.y;
}
_ => (),
});
}
let vertical_container = Rc::new(VerticalLayoutContainer::new());
vertical_container.set_margin_all(0.0);
vertical_container.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
vertical_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let picture_area_container = Rc::new(VerticalLayoutContainer::new());
picture_area_container.set_margin_all(0.0);
picture_area_container.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
picture_area_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let update_notification = Rc::new(HorizontalLayoutContainer::new());
let update_label = Rc::new(Label::new());
let update_label_image = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/new-version-available.png"
)));
let update_label_image_light = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/new-version-available-light.png"
)));
{
update_notification.set_vertical_align(Alignment::End);
update_notification.set_horizontal_align(Alignment::Start);
update_notification.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
update_notification.set_height(Length::Fixed(32.0));
update_label.set_icon(Some(update_label_image.clone()));
update_label.set_margin_top(4.0);
update_label.set_margin_bottom(4.0);
update_label.set_fixed_size(LogicalVector::new(200.0, 24.0));
update_label.set_horizontal_align(Alignment::Center);
let update_button = Rc::new(Button::new());
let button_image =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/visit-site.png")));
update_button.set_icon(Some(button_image));
update_button.set_margin_top(4.0);
update_button.set_margin_bottom(4.0);
update_button.set_fixed_size(LogicalVector::new(100.0, 24.0));
update_button.set_horizontal_align(Alignment::Center);
update_button.set_on_click(|| {
open::that("https://arturkovacs.github.io/emulsion-website/").unwrap();
});
update_notification.add_child(update_label.clone());
update_notification.add_child(update_button);
}
let usage_img = Picture::from_encoded_bytes(include_bytes!("../resource/usage.png"));
let help_screen = Rc::new(HelpScreen::new(usage_img));
let bottom_container = Rc::new(HorizontalLayoutContainer::new());
//bottom_container.set_margin_top(4.0);
//bottom_container.set_margin_bottom(4.0);
bottom_container.set_margin_left(0.0);
bottom_container.set_margin_right(0.0);
bottom_container.set_height(Length::Fixed(32.0));
bottom_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let moon_img = Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/moon.png")));
let light_img = Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/light.png")));
let theme_button = Rc::new(Button::new());
theme_button.set_margin_top(5.0);
theme_button.set_margin_left(28.0);
theme_button.set_margin_right(4.0);
theme_button.set_height(Length::Fixed(24.0));
theme_button.set_width(Length::Fixed(24.0));
theme_button.set_horizontal_align(Alignment::Center);
theme_button.set_icon(Some(moon_img.clone()));
let question =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question_button.png")));
let question_light = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/question_button_light.png"
)));
let question_noti =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question-noti.png")));
let question_light_noti =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question-light-noti.png")));
let help_button = Rc::new(Button::new());
help_button.set_margin_top(5.0);
help_button.set_margin_left(4.0);
help_button.set_margin_right(28.0);
help_button.set_height(Length::Fixed(24.0));
help_button.set_width(Length::Fixed(24.0));
help_button.set_horizontal_align(Alignment::Center);
help_button.set_icon(Some(question.clone()));
let slider = Rc::new(Slider::new());
slider.set_margin_top(5.0);
slider.set_margin_left(4.0);
slider.set_margin_right(4.0);
slider.set_height(Length::Fixed(24.0));
slider.set_width(Length::Stretch { min: 0.0, max: 600.0 });
slider.set_horizontal_align(Alignment::Center);
slider.set_steps(6, 1);
let picture_widget = Rc::new(PictureWidget::new(
&window.display_mut(),
&window,
slider.clone(),
bottom_container.clone(),
config.clone(),
));
picture_widget.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
picture_widget.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
if let Some(file_path) = std::env::args().nth(1) {
picture_widget.jump_to_path(file_path);
}
bottom_container.add_child(theme_button.clone());
bottom_container.add_child(slider.clone());
bottom_container.add_child(help_button.clone());
picture_area_container.add_child(picture_widget.clone());
picture_area_container.add_child(help_screen.clone());
picture_area_container.add_child(update_notification.clone());
vertical_container.add_child(picture_area_container);
vertical_container.add_child(bottom_container.clone());
let update_available = Arc::new(AtomicBool::new(false));
let update_check_done = Arc::new(AtomicBool::new(false));
let light_theme = Rc::new(Cell::new(!cache.lock().unwrap().window.dark));
let theme_button_clone = theme_button.clone();
let help_button_clone = help_button.clone();
let update_label_clone = update_label;
let picture_widget_clone = picture_widget.clone();
let bottom_container_clone = bottom_container;
let update_notification_clone = update_notification.clone();
let slider_clone = slider.clone();
let window_clone = window.clone();
let light_theme_clone = light_theme.clone();
let update_available_clone = update_available.clone();
let set_theme = Rc::new(move || {
if light_theme_clone.get() {
picture_widget_clone.set_bright_shade(0.96);
bottom_container_clone.set_bg_color([1.0, 1.0, 1.0, 1.0]);
slider_clone.set_shadow_color([0.0, 0.0, 0.0]);
window_clone.set_bg_color([0.85, 0.85, 0.85, 1.0]);
theme_button_clone.set_icon(Some(moon_img.clone()));
update_notification_clone.set_bg_color([0.06, 0.06, 0.06, 1.0]);
update_label_clone.set_icon(Some(update_label_image_light.clone()));
if update_available_clone.load(Ordering::SeqCst) {
help_button_clone.set_icon(Some(question_noti.clone()));
} else {
help_button_clone.set_icon(Some(question.clone()));
}
} else {
picture_widget_clone.set_bright_shade(0.11);
bottom_container_clone.set_bg_color([0.08, 0.08, 0.08, 1.0]);
slider_clone.set_shadow_color([0.0, 0.0, 0.0]);
window_clone.set_bg_color([0.03, 0.03, 0.03, 1.0]);
theme_button_clone.set_icon(Some(light_img.clone()));
update_notification_clone.set_bg_color([0.85, 0.85, 0.85, 1.0]);
update_label_clone.set_icon(Some(update_label_image.clone()));
if update_available_clone.load(Ordering::SeqCst) {
help_button_clone.set_icon(Some(question_light_noti.clone()));
} else {
help_button_clone.set_icon(Some(question_light.clone()));
}
}
});
set_theme();
{
let cache = cache.clone();
let set_theme = set_theme.clone();
theme_button.set_on_click(move || {
light_theme.set(!light_theme.get());
cache.lock().unwrap().window.dark =!light_theme.get();
set_theme();
});
}
let slider_clone2 = slider.clone();
let image_widget_clone = picture_widget;
slider.set_on_value_change(move || {
image_widget_clone.jump_to_index(slider_clone2.value());
});
let help_visible = Cell::new(first_launch);
help_screen.set_visible(help_visible.get());
let update_available_clone = update_available.clone();
let help_screen_clone = help_screen.clone();
let update_notification_clone = update_notification.clone();
update_notification
.set_visible(help_visible.get() && update_available_clone.load(Ordering::SeqCst));
help_button.set_on_click(move || {
help_visible.set(!help_visible.get());
help_screen_clone.set_visible(help_visible.get());
update_notification_clone
.set_visible(help_visible.get() && update_available_clone.load(Ordering::SeqCst));
});
window.set_root(vertical_container);
let check_updates_enabled = match &config.borrow().updates {
Some(u) if!u.check_updates => false,
_ => true,
};
let update_checker_join_handle = {
let updates = &mut cache.lock().unwrap().updates;
let cache = cache.clone();
let update_available = update_available.clone();
let update_check_done = update_check_done.clone();
if check_updates_enabled && updates.update_check_needed() {
// kick off a thread that will check for an update in the background
Some(std::thread::spawn(move || {
let has_update = check_for_updates();
update_available.store(has_update, Ordering::SeqCst);
update_check_done.store(true, Ordering::SeqCst);
if!has_update {
cache.lock().unwrap().updates.set_update_check_time();
}
}))
} else {
None
}
};
let mut nothing_to_do = false;
application.add_global_event_handler(move |_| {
if nothing_to_do {
return NextUpdate::Latest;
}
if update_check_done.load(Ordering::SeqCst) {
nothing_to_do = true;
set_theme();
if help_screen.visible() && update_available.load(Ordering::SeqCst) {
update_notification.set_visible(true);
}
}
NextUpdate::WaitUntil(Instant::now() + Duration::from_secs(1))
});
application.set_at_exit(Some(move || {
cache.lock().unwrap().save(cache_path).unwrap();
if let Some(h) = update_checker_join_handle {
h.join().unwrap();
}
}));
application.start_event_loop();
}
// ========================================================
#[derive(Deserialize)]
struct ReleaseInfoJson {
tag_name: String,
}
fn get_config_and_cache_paths() -> (PathBuf, PathBuf) {
let config_folder;
let cache_folder;
if let Some(ref project_dirs) = *PROJECT_DIRS {
config_folder = project_dirs.config_dir().to_owned();
cache_folder = project_dirs.cache_dir().to_owned();
} else {
let exe_path = std::env::current_exe().unwrap();
let exe_folder = exe_path.parent().unwrap();
config_folder = exe_folder.to_owned();
cache_folder = exe_folder.to_owned();
}
if!config_folder.exists() {
std::fs::create_dir_all(&config_folder).unwrap();
}
if!cache_folder.exists() {
std::fs::create_dir_all(&cache_folder).unwrap();
}
(config_folder.join("cfg.toml"), cache_folder.join("cache.toml"))
}
#[cfg(not(feature = "networking"))]
/// Always returns false without the `networking` feature.
fn | () -> bool {
false
}
#[cfg(feature = "networking")]
/// Returns true if updates are available.
fn check_for_updates() -> bool {
let client;
match reqwest::blocking::Client::builder().user_agent("emulsion").build() {
Ok(c) => client = c,
Err(e) => {
println!("Could not build client for version request: {}", e);
return false;
}
}
let response =
client.get("https://api.github.com/repos/ArturKovacs/emulsion/releases/latest").send();
match response {
Ok(response) => match response.json::<ReleaseInfoJson>() {
Ok(info) => {
println!("Found latest version tag {}", info.tag_name);
let current = Version::cargo_pkg_version();
println!("Current version is '{}'", current);
match Version::from_str(&info.tag_name) {
Ok(latest) => {
println!("Parsed latest version is '{}'", latest);
if latest > current {
return true;
}
}
Err(error) => {
println!("Error parsing version: {}", error.to_string());
}
}
}
Err(e) => println!("Failed to create json from response: {}", e),
},
Err(e) => println!("Failed to get latest version info: {}", e),
}
false
}
| check_for_updates | identifier_name |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
#[macro_use]
extern crate error_chain;
use std::cell::{Cell, RefCell};
use std::f32;
use std::path::PathBuf;
use std::rc::Rc;
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use directories::ProjectDirs;
use lazy_static::lazy_static;
use serde_derive::Deserialize;
use gelatin::glium::glutin::{
dpi::{PhysicalPosition, PhysicalSize},
event::WindowEvent,
window::Icon,
};
use gelatin::{
application::*,
button::*,
image,
label::*,
line_layout_container::*,
misc::*,
picture::*,
slider::*,
window::{Window, WindowDescriptorBuilder},
NextUpdate, Widget,
};
use crate::configuration::{Cache, Configuration};
use crate::help_screen::*;
use crate::picture_widget::*;
#[cfg(feature = "networking")]
use crate::version::Version;
mod configuration;
mod handle_panic;
mod help_screen;
mod image_cache;
mod picture_widget;
mod playback_manager;
mod shaders;
mod utils;
#[cfg(feature = "networking")]
mod version;
lazy_static! {
pub static ref PROJECT_DIRS: Option<ProjectDirs> = ProjectDirs::from("", "", "emulsion");
}
// ========================================================
// Not-so glorious main function
// ========================================================
fn main() {
std::panic::set_hook(Box::new(handle_panic::handle_panic));
let img = image::load_from_memory(include_bytes!("../resource/emulsion48.png")).unwrap();
let rgba = img.into_rgba();
let (w, h) = rgba.dimensions();
let icon = Icon::from_rgba(rgba.into_raw(), w, h).unwrap();
// Load configuration and cache files
let (config_path, cache_path) = get_config_and_cache_paths();
let cache = Cache::load(&cache_path);
let config = Configuration::load(&config_path);
let first_launch = cache.is_err();
let cache = Arc::new(Mutex::new(cache.unwrap_or_default()));
let config = Rc::new(RefCell::new(config.unwrap_or_default()));
let mut application = Application::new();
let window: Rc<Window>;
{
let cache = cache.lock().unwrap();
let window_desc = WindowDescriptorBuilder::default()
.icon(Some(icon))
.size(PhysicalSize::new(cache.window.win_w, cache.window.win_h))
.position(Some(PhysicalPosition::new(cache.window.win_x, cache.window.win_y)))
.build()
.unwrap();
window = Window::new(&mut application, window_desc);
}
{
let cache = cache.clone();
window.add_global_event_handler(move |event| match event {
WindowEvent::Resized(new_size) => {
let mut cache = cache.lock().unwrap();
cache.window.win_w = new_size.width;
cache.window.win_h = new_size.height;
}
WindowEvent::Moved(new_pos) => {
let mut cache = cache.lock().unwrap();
cache.window.win_x = new_pos.x;
cache.window.win_y = new_pos.y;
}
_ => (),
});
}
let vertical_container = Rc::new(VerticalLayoutContainer::new());
vertical_container.set_margin_all(0.0);
vertical_container.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
vertical_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let picture_area_container = Rc::new(VerticalLayoutContainer::new());
picture_area_container.set_margin_all(0.0);
picture_area_container.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
picture_area_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let update_notification = Rc::new(HorizontalLayoutContainer::new());
let update_label = Rc::new(Label::new());
let update_label_image = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/new-version-available.png"
)));
let update_label_image_light = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/new-version-available-light.png"
)));
{
update_notification.set_vertical_align(Alignment::End);
update_notification.set_horizontal_align(Alignment::Start);
update_notification.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
update_notification.set_height(Length::Fixed(32.0));
update_label.set_icon(Some(update_label_image.clone()));
update_label.set_margin_top(4.0);
update_label.set_margin_bottom(4.0);
update_label.set_fixed_size(LogicalVector::new(200.0, 24.0));
update_label.set_horizontal_align(Alignment::Center);
let update_button = Rc::new(Button::new());
let button_image =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/visit-site.png")));
update_button.set_icon(Some(button_image));
update_button.set_margin_top(4.0);
update_button.set_margin_bottom(4.0);
update_button.set_fixed_size(LogicalVector::new(100.0, 24.0));
update_button.set_horizontal_align(Alignment::Center);
update_button.set_on_click(|| {
open::that("https://arturkovacs.github.io/emulsion-website/").unwrap();
});
update_notification.add_child(update_label.clone());
update_notification.add_child(update_button);
}
let usage_img = Picture::from_encoded_bytes(include_bytes!("../resource/usage.png"));
let help_screen = Rc::new(HelpScreen::new(usage_img));
let bottom_container = Rc::new(HorizontalLayoutContainer::new());
//bottom_container.set_margin_top(4.0);
//bottom_container.set_margin_bottom(4.0);
bottom_container.set_margin_left(0.0);
bottom_container.set_margin_right(0.0);
bottom_container.set_height(Length::Fixed(32.0));
bottom_container.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
let moon_img = Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/moon.png")));
let light_img = Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/light.png")));
let theme_button = Rc::new(Button::new());
theme_button.set_margin_top(5.0);
theme_button.set_margin_left(28.0);
theme_button.set_margin_right(4.0);
theme_button.set_height(Length::Fixed(24.0));
theme_button.set_width(Length::Fixed(24.0));
theme_button.set_horizontal_align(Alignment::Center);
theme_button.set_icon(Some(moon_img.clone()));
let question =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question_button.png")));
let question_light = Rc::new(Picture::from_encoded_bytes(include_bytes!(
"../resource/question_button_light.png"
)));
let question_noti =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question-noti.png")));
let question_light_noti =
Rc::new(Picture::from_encoded_bytes(include_bytes!("../resource/question-light-noti.png")));
let help_button = Rc::new(Button::new());
help_button.set_margin_top(5.0);
help_button.set_margin_left(4.0);
help_button.set_margin_right(28.0);
help_button.set_height(Length::Fixed(24.0));
help_button.set_width(Length::Fixed(24.0));
help_button.set_horizontal_align(Alignment::Center);
help_button.set_icon(Some(question.clone()));
let slider = Rc::new(Slider::new());
slider.set_margin_top(5.0);
slider.set_margin_left(4.0);
slider.set_margin_right(4.0);
slider.set_height(Length::Fixed(24.0));
slider.set_width(Length::Stretch { min: 0.0, max: 600.0 });
slider.set_horizontal_align(Alignment::Center);
slider.set_steps(6, 1);
let picture_widget = Rc::new(PictureWidget::new(
&window.display_mut(),
&window,
slider.clone(),
bottom_container.clone(),
config.clone(),
));
picture_widget.set_height(Length::Stretch { min: 0.0, max: f32::INFINITY });
picture_widget.set_width(Length::Stretch { min: 0.0, max: f32::INFINITY });
if let Some(file_path) = std::env::args().nth(1) {
picture_widget.jump_to_path(file_path);
}
bottom_container.add_child(theme_button.clone());
bottom_container.add_child(slider.clone());
bottom_container.add_child(help_button.clone());
picture_area_container.add_child(picture_widget.clone());
picture_area_container.add_child(help_screen.clone());
picture_area_container.add_child(update_notification.clone());
vertical_container.add_child(picture_area_container);
vertical_container.add_child(bottom_container.clone());
let update_available = Arc::new(AtomicBool::new(false));
let update_check_done = Arc::new(AtomicBool::new(false));
let light_theme = Rc::new(Cell::new(!cache.lock().unwrap().window.dark));
let theme_button_clone = theme_button.clone();
let help_button_clone = help_button.clone();
let update_label_clone = update_label;
let picture_widget_clone = picture_widget.clone();
let bottom_container_clone = bottom_container;
let update_notification_clone = update_notification.clone();
let slider_clone = slider.clone();
let window_clone = window.clone();
let light_theme_clone = light_theme.clone();
let update_available_clone = update_available.clone();
let set_theme = Rc::new(move || {
if light_theme_clone.get() {
picture_widget_clone.set_bright_shade(0.96);
bottom_container_clone.set_bg_color([1.0, 1.0, 1.0, 1.0]);
slider_clone.set_shadow_color([0.0, 0.0, 0.0]);
window_clone.set_bg_color([0.85, 0.85, 0.85, 1.0]);
theme_button_clone.set_icon(Some(moon_img.clone()));
update_notification_clone.set_bg_color([0.06, 0.06, 0.06, 1.0]);
update_label_clone.set_icon(Some(update_label_image_light.clone()));
if update_available_clone.load(Ordering::SeqCst) {
help_button_clone.set_icon(Some(question_noti.clone()));
} else {
help_button_clone.set_icon(Some(question.clone()));
}
} else {
picture_widget_clone.set_bright_shade(0.11);
bottom_container_clone.set_bg_color([0.08, 0.08, 0.08, 1.0]);
slider_clone.set_shadow_color([0.0, 0.0, 0.0]);
window_clone.set_bg_color([0.03, 0.03, 0.03, 1.0]);
theme_button_clone.set_icon(Some(light_img.clone()));
update_notification_clone.set_bg_color([0.85, 0.85, 0.85, 1.0]);
update_label_clone.set_icon(Some(update_label_image.clone()));
if update_available_clone.load(Ordering::SeqCst) {
help_button_clone.set_icon(Some(question_light_noti.clone()));
} else {
help_button_clone.set_icon(Some(question_light.clone()));
}
}
});
set_theme();
{
let cache = cache.clone();
let set_theme = set_theme.clone();
theme_button.set_on_click(move || {
light_theme.set(!light_theme.get());
cache.lock().unwrap().window.dark =!light_theme.get();
set_theme();
});
}
let slider_clone2 = slider.clone();
let image_widget_clone = picture_widget;
slider.set_on_value_change(move || {
image_widget_clone.jump_to_index(slider_clone2.value());
});
let help_visible = Cell::new(first_launch);
help_screen.set_visible(help_visible.get());
let update_available_clone = update_available.clone();
let help_screen_clone = help_screen.clone();
let update_notification_clone = update_notification.clone();
update_notification
.set_visible(help_visible.get() && update_available_clone.load(Ordering::SeqCst));
help_button.set_on_click(move || {
help_visible.set(!help_visible.get());
help_screen_clone.set_visible(help_visible.get());
update_notification_clone
.set_visible(help_visible.get() && update_available_clone.load(Ordering::SeqCst));
});
window.set_root(vertical_container);
let check_updates_enabled = match &config.borrow().updates {
Some(u) if!u.check_updates => false,
_ => true,
};
let update_checker_join_handle = {
let updates = &mut cache.lock().unwrap().updates;
let cache = cache.clone();
let update_available = update_available.clone();
let update_check_done = update_check_done.clone();
if check_updates_enabled && updates.update_check_needed() {
// kick off a thread that will check for an update in the background
Some(std::thread::spawn(move || {
let has_update = check_for_updates();
update_available.store(has_update, Ordering::SeqCst);
update_check_done.store(true, Ordering::SeqCst);
if!has_update {
cache.lock().unwrap().updates.set_update_check_time();
}
}))
} else {
None
}
};
let mut nothing_to_do = false;
application.add_global_event_handler(move |_| {
if nothing_to_do {
return NextUpdate::Latest;
}
if update_check_done.load(Ordering::SeqCst) {
nothing_to_do = true;
set_theme();
if help_screen.visible() && update_available.load(Ordering::SeqCst) {
update_notification.set_visible(true);
}
}
NextUpdate::WaitUntil(Instant::now() + Duration::from_secs(1))
});
application.set_at_exit(Some(move || {
cache.lock().unwrap().save(cache_path).unwrap();
if let Some(h) = update_checker_join_handle {
h.join().unwrap();
}
}));
application.start_event_loop();
}
// ========================================================
#[derive(Deserialize)]
struct ReleaseInfoJson {
tag_name: String,
}
fn get_config_and_cache_paths() -> (PathBuf, PathBuf) {
let config_folder;
let cache_folder;
if let Some(ref project_dirs) = *PROJECT_DIRS {
config_folder = project_dirs.config_dir().to_owned();
cache_folder = project_dirs.cache_dir().to_owned();
} else {
let exe_path = std::env::current_exe().unwrap();
let exe_folder = exe_path.parent().unwrap();
config_folder = exe_folder.to_owned();
cache_folder = exe_folder.to_owned();
}
if!config_folder.exists() |
if!cache_folder.exists() {
std::fs::create_dir_all(&cache_folder).unwrap();
}
(config_folder.join("cfg.toml"), cache_folder.join("cache.toml"))
}
#[cfg(not(feature = "networking"))]
/// Always returns false without the `networking` feature.
fn check_for_updates() -> bool {
false
}
#[cfg(feature = "networking")]
/// Returns true if updates are available.
fn check_for_updates() -> bool {
let client;
match reqwest::blocking::Client::builder().user_agent("emulsion").build() {
Ok(c) => client = c,
Err(e) => {
println!("Could not build client for version request: {}", e);
return false;
}
}
let response =
client.get("https://api.github.com/repos/ArturKovacs/emulsion/releases/latest").send();
match response {
Ok(response) => match response.json::<ReleaseInfoJson>() {
Ok(info) => {
println!("Found latest version tag {}", info.tag_name);
let current = Version::cargo_pkg_version();
println!("Current version is '{}'", current);
match Version::from_str(&info.tag_name) {
Ok(latest) => {
println!("Parsed latest version is '{}'", latest);
if latest > current {
return true;
}
}
Err(error) => {
println!("Error parsing version: {}", error.to_string());
}
}
}
Err(e) => println!("Failed to create json from response: {}", e),
},
Err(e) => println!("Failed to get latest version info: {}", e),
}
false
}
| {
std::fs::create_dir_all(&config_folder).unwrap();
} | conditional_block |
mod.rs | //! The central Framework struct that ties everything together.
// Prefix and slash specific implementation details
mod prefix;
mod slash;
mod builder;
pub use builder::*;
use crate::serenity::client::{bridge::gateway::ShardManager, Client};
use crate::serenity_prelude as serenity;
use crate::*;
pub use prefix::dispatch_message;
async fn check_permissions<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
) -> bool {
if required_permissions.is_empty() {
return true;
}
let guild_id = match ctx.guild_id() {
Some(x) => x,
None => return true, // no permission checks in DMs
};
let guild = match ctx.discord().cache.guild(guild_id) {
Some(x) => x,
None => return false, // Guild not in cache
};
let channel = match guild.channels.get(&ctx.channel_id()) {
Some(serenity::Channel::Guild(channel)) => channel,
Some(_other_channel) => {
println!(
"Warning: guild message was supposedly sent in a non-guild channel. Denying invocation"
);
return false;
}
None => return false,
};
// If member not in cache (probably because presences intent is not enabled), retrieve via HTTP
let member = match guild.members.get(&ctx.author().id) {
Some(x) => x.clone(),
None => match ctx
.discord()
.http
.get_member(guild_id.0, ctx.author().id.0)
.await
{
Ok(member) => member,
Err(_) => return false,
},
};
match guild.user_permissions_in(channel, &member) {
Ok(perms) => perms.contains(required_permissions),
Err(_) => false,
}
}
async fn check_required_permissions_and_owners_only<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
owners_only: bool,
) -> bool {
if owners_only &&!ctx.framework().options().owners.contains(&ctx.author().id) {
return false;
}
if!check_permissions(ctx, required_permissions).await {
return false;
}
true
}
/// The main framework struct which stores all data and handles message and interaction dispatch.
pub struct Framework<U, E> {
user_data: once_cell::sync::OnceCell<U>,
bot_id: serenity::UserId,
// TODO: wrap in RwLock to allow changing framework options while running? Could also replace
// the edit tracking cache interior mutability
options: FrameworkOptions<U, E>,
application_id: serenity::ApplicationId,
// Will be initialized to Some on construction, and then taken out on startup
client: std::sync::Mutex<Option<serenity::Client>>,
// Initialized to Some during construction; so shouldn't be None at any observable point
shard_manager: std::sync::Mutex<Option<std::sync::Arc<tokio::sync::Mutex<ShardManager>>>>,
// Filled with Some on construction. Taken out and executed on first Ready gateway event
user_data_setup: std::sync::Mutex<
Option<
Box<
dyn Send
+ Sync
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
>,
>,
>,
}
impl<U, E> Framework<U, E> {
/// Create a framework builder to configure, create and run a framework.
///
/// For more information, see [`FrameworkBuilder`]
pub fn build() -> FrameworkBuilder<U, E> {
FrameworkBuilder::default()
}
/// Setup a new [`Framework`]. For more ergonomic setup, please see [`FrameworkBuilder`]
///
/// This function is async and returns Result because it already initializes the Discord client.
///
/// The user data callback is invoked as soon as the bot is logged in. That way, bot data like
/// user ID or connected guilds can be made available to the user data setup function. The user
/// data setup is not allowed to return Result because there would be no reasonable
/// course of action on error.
pub async fn new<F>(
application_id: serenity::ApplicationId,
client_builder: serenity::ClientBuilder<'_>,
user_data_setup: F,
options: FrameworkOptions<U, E>,
) -> Result<std::sync::Arc<Self>, serenity::Error>
where
F: Send
+ Sync
+'static
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
U: Send + Sync +'static,
E: Send +'static,
{
let self_1 = std::sync::Arc::new(Self {
user_data: once_cell::sync::OnceCell::new(),
user_data_setup: std::sync::Mutex::new(Some(Box::new(user_data_setup))),
bot_id: serenity::parse_token(client_builder.get_token().trim_start_matches("Bot "))
.expect("Invalid bot token")
.bot_user_id,
// To break up the circular dependency (framework setup -> client setup -> event handler
// -> framework), we initialize this with None and then immediately fill in once the
// client is created
client: std::sync::Mutex::new(None),
options,
application_id,
shard_manager: std::sync::Mutex::new(None),
});
let self_2 = self_1.clone();
let event_handler = EventWrapper(move |ctx, event| {
let self_2 = std::sync::Arc::clone(&self_2);
Box::pin(async move {
self_2.event(ctx, event).await;
}) as _
});
let client: Client = client_builder
.application_id(application_id.0)
.event_handler(event_handler)
.await?;
*self_1.shard_manager.lock().unwrap() = Some(client.shard_manager.clone());
*self_1.client.lock().unwrap() = Some(client);
Ok(self_1)
}
/// Start the framework.
///
/// Takes a `serenity::ClientBuilder`, in which you need to supply the bot token, as well as
/// any gateway intents.
pub async fn start(self: std::sync::Arc<Self>) -> Result<(), serenity::Error>
where
U: Send + Sync +'static,
E: Send +'static,
{
let mut client = self
.client
.lock()
.unwrap()
.take()
.expect("Prepared client is missing");
let edit_track_cache_purge_task = tokio::spawn(async move {
loop {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
edit_tracker.write().unwrap().purge();
}
// not sure if the purging interval should be configurable
tokio::time::sleep(std::time::Duration::from_secs(60)).await;
}
});
// This will run for as long as the bot is active
client.start().await?;
edit_track_cache_purge_task.abort();
Ok(())
}
/// Return the stored framework options, including commands.
pub fn options(&self) -> &FrameworkOptions<U, E> {
&self.options
}
/// Returns the application ID given to the framework on its creation.
pub fn application_id(&self) -> serenity::ApplicationId {
self.application_id
}
/// Returns the serenity's client shard manager.
pub fn shard_manager(&self) -> std::sync::Arc<tokio::sync::Mutex<ShardManager>> {
self.shard_manager
.lock()
.unwrap()
.clone()
.expect("fatal: shard manager not stored in framework initialization")
}
async fn get_user_data(&self) -> &U {
// We shouldn't get a Message event before a Ready event. But if we do, wait until
// the Ready event does come and the resulting data has arrived.
loop {
match self.user_data.get() {
Some(x) => break x,
None => tokio::time::sleep(std::time::Duration::from_millis(100)).await,
}
}
}
async fn event(&self, ctx: serenity::Context, event: Event<'_>)
where
U: Send + Sync,
{
match &event {
Event::Ready { data_about_bot } => {
let user_data_setup = Option::take(&mut *self.user_data_setup.lock().unwrap());
if let Some(user_data_setup) = user_data_setup {
match user_data_setup(&ctx, data_about_bot, self).await {
Ok(user_data) => {
let _: Result<_, _> = self.user_data.set(user_data);
}
Err(e) => (self.options.on_error)(e, ErrorContext::Setup).await,
}
} else {
// discarding duplicate Discord bot ready event
// (happens regularly when bot is online for long period of time)
}
}
Event::Message { new_message } => {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, new_message, false).await
{
if let Some(on_error) = ctx.command.options.on_error | else {
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(ctx)),
)
.await;
}
}
}
Event::MessageUpdate { event,.. } => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let msg = edit_tracker.write().unwrap().process_message_update(
event,
self.options().prefix_options.ignore_edit_tracker_cache,
);
if let Some(msg) = msg {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, &msg, true).await
{
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(
ctx,
)),
)
.await;
}
}
}
}
Event::MessageDelete {
deleted_message_id,..
} => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let bot_response = edit_tracker
.write()
.unwrap()
.find_bot_response(*deleted_message_id)
.cloned();
if let Some(bot_response) = bot_response {
if let Err(e) = bot_response.delete(&ctx).await {
println!(
"Warning: couldn't delete bot response when user deleted message: {}",
e
);
}
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::ApplicationCommand(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_interaction(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(
e,
ErrorContext::Command(CommandErrorContext::Application(error_ctx)),
)
.await;
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::Autocomplete(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_autocomplete(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(e, ErrorContext::Autocomplete(error_ctx)).await;
}
}
}
_ => {}
}
// Do this after the framework's Ready handling, so that self.get_user_data() doesnt
// potentially block infinitely
if let Err(e) =
(self.options.listener)(&ctx, &event, self, self.get_user_data().await).await
{
(self.options.on_error)(e, ErrorContext::Listener(&event));
}
}
}
| {
(on_error)(err, ctx).await;
} | conditional_block |
mod.rs | //! The central Framework struct that ties everything together.
// Prefix and slash specific implementation details
mod prefix;
mod slash;
mod builder;
pub use builder::*;
use crate::serenity::client::{bridge::gateway::ShardManager, Client};
use crate::serenity_prelude as serenity;
use crate::*;
pub use prefix::dispatch_message;
async fn check_permissions<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
) -> bool {
if required_permissions.is_empty() {
return true;
}
let guild_id = match ctx.guild_id() {
Some(x) => x,
None => return true, // no permission checks in DMs
};
let guild = match ctx.discord().cache.guild(guild_id) {
Some(x) => x,
None => return false, // Guild not in cache
};
let channel = match guild.channels.get(&ctx.channel_id()) {
Some(serenity::Channel::Guild(channel)) => channel,
Some(_other_channel) => {
println!(
"Warning: guild message was supposedly sent in a non-guild channel. Denying invocation"
);
return false;
}
None => return false,
};
// If member not in cache (probably because presences intent is not enabled), retrieve via HTTP
let member = match guild.members.get(&ctx.author().id) {
Some(x) => x.clone(),
None => match ctx
.discord()
.http
.get_member(guild_id.0, ctx.author().id.0)
.await
{
Ok(member) => member,
Err(_) => return false,
},
};
match guild.user_permissions_in(channel, &member) {
Ok(perms) => perms.contains(required_permissions),
Err(_) => false,
}
}
async fn check_required_permissions_and_owners_only<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
owners_only: bool,
) -> bool {
if owners_only &&!ctx.framework().options().owners.contains(&ctx.author().id) {
return false;
}
if!check_permissions(ctx, required_permissions).await {
return false;
}
true
}
/// The main framework struct which stores all data and handles message and interaction dispatch.
pub struct Framework<U, E> {
user_data: once_cell::sync::OnceCell<U>,
bot_id: serenity::UserId,
// TODO: wrap in RwLock to allow changing framework options while running? Could also replace
// the edit tracking cache interior mutability
options: FrameworkOptions<U, E>,
application_id: serenity::ApplicationId,
// Will be initialized to Some on construction, and then taken out on startup
client: std::sync::Mutex<Option<serenity::Client>>,
// Initialized to Some during construction; so shouldn't be None at any observable point
shard_manager: std::sync::Mutex<Option<std::sync::Arc<tokio::sync::Mutex<ShardManager>>>>,
// Filled with Some on construction. Taken out and executed on first Ready gateway event
user_data_setup: std::sync::Mutex<
Option<
Box<
dyn Send
+ Sync
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
>,
>,
>,
}
impl<U, E> Framework<U, E> {
/// Create a framework builder to configure, create and run a framework.
///
/// For more information, see [`FrameworkBuilder`]
pub fn build() -> FrameworkBuilder<U, E> {
FrameworkBuilder::default()
}
/// Setup a new [`Framework`]. For more ergonomic setup, please see [`FrameworkBuilder`]
///
/// This function is async and returns Result because it already initializes the Discord client.
///
/// The user data callback is invoked as soon as the bot is logged in. That way, bot data like
/// user ID or connected guilds can be made available to the user data setup function. The user
/// data setup is not allowed to return Result because there would be no reasonable
/// course of action on error.
pub async fn new<F>(
application_id: serenity::ApplicationId,
client_builder: serenity::ClientBuilder<'_>,
user_data_setup: F,
options: FrameworkOptions<U, E>,
) -> Result<std::sync::Arc<Self>, serenity::Error>
where
F: Send
+ Sync
+'static
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
U: Send + Sync +'static,
E: Send +'static,
{
let self_1 = std::sync::Arc::new(Self {
user_data: once_cell::sync::OnceCell::new(),
user_data_setup: std::sync::Mutex::new(Some(Box::new(user_data_setup))),
bot_id: serenity::parse_token(client_builder.get_token().trim_start_matches("Bot "))
.expect("Invalid bot token")
.bot_user_id,
// To break up the circular dependency (framework setup -> client setup -> event handler
// -> framework), we initialize this with None and then immediately fill in once the
// client is created
client: std::sync::Mutex::new(None),
options,
application_id,
shard_manager: std::sync::Mutex::new(None),
});
let self_2 = self_1.clone();
let event_handler = EventWrapper(move |ctx, event| {
let self_2 = std::sync::Arc::clone(&self_2);
Box::pin(async move {
self_2.event(ctx, event).await;
}) as _
});
let client: Client = client_builder
.application_id(application_id.0)
.event_handler(event_handler)
.await?;
*self_1.shard_manager.lock().unwrap() = Some(client.shard_manager.clone());
*self_1.client.lock().unwrap() = Some(client);
Ok(self_1)
}
/// Start the framework.
///
/// Takes a `serenity::ClientBuilder`, in which you need to supply the bot token, as well as
/// any gateway intents.
pub async fn start(self: std::sync::Arc<Self>) -> Result<(), serenity::Error>
where
U: Send + Sync +'static,
E: Send +'static,
{
let mut client = self
.client
.lock()
.unwrap()
.take()
.expect("Prepared client is missing");
let edit_track_cache_purge_task = tokio::spawn(async move {
loop {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
edit_tracker.write().unwrap().purge();
}
// not sure if the purging interval should be configurable
tokio::time::sleep(std::time::Duration::from_secs(60)).await;
}
});
// This will run for as long as the bot is active
client.start().await?;
edit_track_cache_purge_task.abort();
Ok(())
}
/// Return the stored framework options, including commands.
pub fn options(&self) -> &FrameworkOptions<U, E> {
&self.options
}
/// Returns the application ID given to the framework on its creation.
pub fn application_id(&self) -> serenity::ApplicationId {
self.application_id
}
/// Returns the serenity's client shard manager.
pub fn shard_manager(&self) -> std::sync::Arc<tokio::sync::Mutex<ShardManager>> {
self.shard_manager
.lock()
.unwrap()
.clone()
.expect("fatal: shard manager not stored in framework initialization")
}
async fn | (&self) -> &U {
// We shouldn't get a Message event before a Ready event. But if we do, wait until
// the Ready event does come and the resulting data has arrived.
loop {
match self.user_data.get() {
Some(x) => break x,
None => tokio::time::sleep(std::time::Duration::from_millis(100)).await,
}
}
}
async fn event(&self, ctx: serenity::Context, event: Event<'_>)
where
U: Send + Sync,
{
match &event {
Event::Ready { data_about_bot } => {
let user_data_setup = Option::take(&mut *self.user_data_setup.lock().unwrap());
if let Some(user_data_setup) = user_data_setup {
match user_data_setup(&ctx, data_about_bot, self).await {
Ok(user_data) => {
let _: Result<_, _> = self.user_data.set(user_data);
}
Err(e) => (self.options.on_error)(e, ErrorContext::Setup).await,
}
} else {
// discarding duplicate Discord bot ready event
// (happens regularly when bot is online for long period of time)
}
}
Event::Message { new_message } => {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, new_message, false).await
{
if let Some(on_error) = ctx.command.options.on_error {
(on_error)(err, ctx).await;
} else {
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(ctx)),
)
.await;
}
}
}
Event::MessageUpdate { event,.. } => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let msg = edit_tracker.write().unwrap().process_message_update(
event,
self.options().prefix_options.ignore_edit_tracker_cache,
);
if let Some(msg) = msg {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, &msg, true).await
{
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(
ctx,
)),
)
.await;
}
}
}
}
Event::MessageDelete {
deleted_message_id,..
} => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let bot_response = edit_tracker
.write()
.unwrap()
.find_bot_response(*deleted_message_id)
.cloned();
if let Some(bot_response) = bot_response {
if let Err(e) = bot_response.delete(&ctx).await {
println!(
"Warning: couldn't delete bot response when user deleted message: {}",
e
);
}
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::ApplicationCommand(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_interaction(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(
e,
ErrorContext::Command(CommandErrorContext::Application(error_ctx)),
)
.await;
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::Autocomplete(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_autocomplete(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(e, ErrorContext::Autocomplete(error_ctx)).await;
}
}
}
_ => {}
}
// Do this after the framework's Ready handling, so that self.get_user_data() doesnt
// potentially block infinitely
if let Err(e) =
(self.options.listener)(&ctx, &event, self, self.get_user_data().await).await
{
(self.options.on_error)(e, ErrorContext::Listener(&event));
}
}
}
| get_user_data | identifier_name |
mod.rs | //! The central Framework struct that ties everything together.
// Prefix and slash specific implementation details
mod prefix;
mod slash;
mod builder;
pub use builder::*;
use crate::serenity::client::{bridge::gateway::ShardManager, Client};
use crate::serenity_prelude as serenity;
use crate::*;
pub use prefix::dispatch_message;
async fn check_permissions<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
) -> bool | );
return false;
}
None => return false,
};
// If member not in cache (probably because presences intent is not enabled), retrieve via HTTP
let member = match guild.members.get(&ctx.author().id) {
Some(x) => x.clone(),
None => match ctx
.discord()
.http
.get_member(guild_id.0, ctx.author().id.0)
.await
{
Ok(member) => member,
Err(_) => return false,
},
};
match guild.user_permissions_in(channel, &member) {
Ok(perms) => perms.contains(required_permissions),
Err(_) => false,
}
}
async fn check_required_permissions_and_owners_only<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
owners_only: bool,
) -> bool {
if owners_only &&!ctx.framework().options().owners.contains(&ctx.author().id) {
return false;
}
if!check_permissions(ctx, required_permissions).await {
return false;
}
true
}
/// The main framework struct which stores all data and handles message and interaction dispatch.
pub struct Framework<U, E> {
user_data: once_cell::sync::OnceCell<U>,
bot_id: serenity::UserId,
// TODO: wrap in RwLock to allow changing framework options while running? Could also replace
// the edit tracking cache interior mutability
options: FrameworkOptions<U, E>,
application_id: serenity::ApplicationId,
// Will be initialized to Some on construction, and then taken out on startup
client: std::sync::Mutex<Option<serenity::Client>>,
// Initialized to Some during construction; so shouldn't be None at any observable point
shard_manager: std::sync::Mutex<Option<std::sync::Arc<tokio::sync::Mutex<ShardManager>>>>,
// Filled with Some on construction. Taken out and executed on first Ready gateway event
user_data_setup: std::sync::Mutex<
Option<
Box<
dyn Send
+ Sync
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
>,
>,
>,
}
impl<U, E> Framework<U, E> {
/// Create a framework builder to configure, create and run a framework.
///
/// For more information, see [`FrameworkBuilder`]
pub fn build() -> FrameworkBuilder<U, E> {
FrameworkBuilder::default()
}
/// Setup a new [`Framework`]. For more ergonomic setup, please see [`FrameworkBuilder`]
///
/// This function is async and returns Result because it already initializes the Discord client.
///
/// The user data callback is invoked as soon as the bot is logged in. That way, bot data like
/// user ID or connected guilds can be made available to the user data setup function. The user
/// data setup is not allowed to return Result because there would be no reasonable
/// course of action on error.
pub async fn new<F>(
application_id: serenity::ApplicationId,
client_builder: serenity::ClientBuilder<'_>,
user_data_setup: F,
options: FrameworkOptions<U, E>,
) -> Result<std::sync::Arc<Self>, serenity::Error>
where
F: Send
+ Sync
+'static
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
U: Send + Sync +'static,
E: Send +'static,
{
let self_1 = std::sync::Arc::new(Self {
user_data: once_cell::sync::OnceCell::new(),
user_data_setup: std::sync::Mutex::new(Some(Box::new(user_data_setup))),
bot_id: serenity::parse_token(client_builder.get_token().trim_start_matches("Bot "))
.expect("Invalid bot token")
.bot_user_id,
// To break up the circular dependency (framework setup -> client setup -> event handler
// -> framework), we initialize this with None and then immediately fill in once the
// client is created
client: std::sync::Mutex::new(None),
options,
application_id,
shard_manager: std::sync::Mutex::new(None),
});
let self_2 = self_1.clone();
let event_handler = EventWrapper(move |ctx, event| {
let self_2 = std::sync::Arc::clone(&self_2);
Box::pin(async move {
self_2.event(ctx, event).await;
}) as _
});
let client: Client = client_builder
.application_id(application_id.0)
.event_handler(event_handler)
.await?;
*self_1.shard_manager.lock().unwrap() = Some(client.shard_manager.clone());
*self_1.client.lock().unwrap() = Some(client);
Ok(self_1)
}
/// Start the framework.
///
/// Takes a `serenity::ClientBuilder`, in which you need to supply the bot token, as well as
/// any gateway intents.
pub async fn start(self: std::sync::Arc<Self>) -> Result<(), serenity::Error>
where
U: Send + Sync +'static,
E: Send +'static,
{
let mut client = self
.client
.lock()
.unwrap()
.take()
.expect("Prepared client is missing");
let edit_track_cache_purge_task = tokio::spawn(async move {
loop {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
edit_tracker.write().unwrap().purge();
}
// not sure if the purging interval should be configurable
tokio::time::sleep(std::time::Duration::from_secs(60)).await;
}
});
// This will run for as long as the bot is active
client.start().await?;
edit_track_cache_purge_task.abort();
Ok(())
}
/// Return the stored framework options, including commands.
pub fn options(&self) -> &FrameworkOptions<U, E> {
&self.options
}
/// Returns the application ID given to the framework on its creation.
pub fn application_id(&self) -> serenity::ApplicationId {
self.application_id
}
/// Returns the serenity's client shard manager.
pub fn shard_manager(&self) -> std::sync::Arc<tokio::sync::Mutex<ShardManager>> {
self.shard_manager
.lock()
.unwrap()
.clone()
.expect("fatal: shard manager not stored in framework initialization")
}
async fn get_user_data(&self) -> &U {
// We shouldn't get a Message event before a Ready event. But if we do, wait until
// the Ready event does come and the resulting data has arrived.
loop {
match self.user_data.get() {
Some(x) => break x,
None => tokio::time::sleep(std::time::Duration::from_millis(100)).await,
}
}
}
async fn event(&self, ctx: serenity::Context, event: Event<'_>)
where
U: Send + Sync,
{
match &event {
Event::Ready { data_about_bot } => {
let user_data_setup = Option::take(&mut *self.user_data_setup.lock().unwrap());
if let Some(user_data_setup) = user_data_setup {
match user_data_setup(&ctx, data_about_bot, self).await {
Ok(user_data) => {
let _: Result<_, _> = self.user_data.set(user_data);
}
Err(e) => (self.options.on_error)(e, ErrorContext::Setup).await,
}
} else {
// discarding duplicate Discord bot ready event
// (happens regularly when bot is online for long period of time)
}
}
Event::Message { new_message } => {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, new_message, false).await
{
if let Some(on_error) = ctx.command.options.on_error {
(on_error)(err, ctx).await;
} else {
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(ctx)),
)
.await;
}
}
}
Event::MessageUpdate { event,.. } => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let msg = edit_tracker.write().unwrap().process_message_update(
event,
self.options().prefix_options.ignore_edit_tracker_cache,
);
if let Some(msg) = msg {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, &msg, true).await
{
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(
ctx,
)),
)
.await;
}
}
}
}
Event::MessageDelete {
deleted_message_id,..
} => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let bot_response = edit_tracker
.write()
.unwrap()
.find_bot_response(*deleted_message_id)
.cloned();
if let Some(bot_response) = bot_response {
if let Err(e) = bot_response.delete(&ctx).await {
println!(
"Warning: couldn't delete bot response when user deleted message: {}",
e
);
}
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::ApplicationCommand(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_interaction(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(
e,
ErrorContext::Command(CommandErrorContext::Application(error_ctx)),
)
.await;
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::Autocomplete(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_autocomplete(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(e, ErrorContext::Autocomplete(error_ctx)).await;
}
}
}
_ => {}
}
// Do this after the framework's Ready handling, so that self.get_user_data() doesnt
// potentially block infinitely
if let Err(e) =
(self.options.listener)(&ctx, &event, self, self.get_user_data().await).await
{
(self.options.on_error)(e, ErrorContext::Listener(&event));
}
}
}
| {
if required_permissions.is_empty() {
return true;
}
let guild_id = match ctx.guild_id() {
Some(x) => x,
None => return true, // no permission checks in DMs
};
let guild = match ctx.discord().cache.guild(guild_id) {
Some(x) => x,
None => return false, // Guild not in cache
};
let channel = match guild.channels.get(&ctx.channel_id()) {
Some(serenity::Channel::Guild(channel)) => channel,
Some(_other_channel) => {
println!(
"Warning: guild message was supposedly sent in a non-guild channel. Denying invocation" | identifier_body |
mod.rs | //! The central Framework struct that ties everything together.
// Prefix and slash specific implementation details
mod prefix;
mod slash;
mod builder;
pub use builder::*;
use crate::serenity::client::{bridge::gateway::ShardManager, Client};
use crate::serenity_prelude as serenity;
use crate::*;
pub use prefix::dispatch_message;
async fn check_permissions<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
) -> bool {
if required_permissions.is_empty() {
return true;
}
let guild_id = match ctx.guild_id() {
Some(x) => x,
None => return true, // no permission checks in DMs
};
let guild = match ctx.discord().cache.guild(guild_id) {
Some(x) => x,
None => return false, // Guild not in cache
};
let channel = match guild.channels.get(&ctx.channel_id()) {
Some(serenity::Channel::Guild(channel)) => channel,
Some(_other_channel) => {
println!(
"Warning: guild message was supposedly sent in a non-guild channel. Denying invocation"
);
return false;
}
None => return false,
};
// If member not in cache (probably because presences intent is not enabled), retrieve via HTTP
let member = match guild.members.get(&ctx.author().id) {
Some(x) => x.clone(),
None => match ctx
.discord()
.http
.get_member(guild_id.0, ctx.author().id.0)
.await
{
Ok(member) => member,
Err(_) => return false,
},
};
match guild.user_permissions_in(channel, &member) {
Ok(perms) => perms.contains(required_permissions),
Err(_) => false,
}
}
async fn check_required_permissions_and_owners_only<U, E>(
ctx: crate::Context<'_, U, E>,
required_permissions: serenity::Permissions,
owners_only: bool,
) -> bool {
if owners_only &&!ctx.framework().options().owners.contains(&ctx.author().id) {
return false;
}
if!check_permissions(ctx, required_permissions).await {
return false;
}
true
}
/// The main framework struct which stores all data and handles message and interaction dispatch.
pub struct Framework<U, E> {
user_data: once_cell::sync::OnceCell<U>,
bot_id: serenity::UserId,
// TODO: wrap in RwLock to allow changing framework options while running? Could also replace
// the edit tracking cache interior mutability
options: FrameworkOptions<U, E>,
application_id: serenity::ApplicationId,
// Will be initialized to Some on construction, and then taken out on startup
client: std::sync::Mutex<Option<serenity::Client>>,
// Initialized to Some during construction; so shouldn't be None at any observable point
shard_manager: std::sync::Mutex<Option<std::sync::Arc<tokio::sync::Mutex<ShardManager>>>>,
// Filled with Some on construction. Taken out and executed on first Ready gateway event
user_data_setup: std::sync::Mutex<
Option<
Box<
dyn Send
+ Sync
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
>,
>,
>,
}
impl<U, E> Framework<U, E> {
/// Create a framework builder to configure, create and run a framework.
///
/// For more information, see [`FrameworkBuilder`]
pub fn build() -> FrameworkBuilder<U, E> {
FrameworkBuilder::default()
}
/// Setup a new [`Framework`]. For more ergonomic setup, please see [`FrameworkBuilder`]
///
/// This function is async and returns Result because it already initializes the Discord client.
///
/// The user data callback is invoked as soon as the bot is logged in. That way, bot data like
/// user ID or connected guilds can be made available to the user data setup function. The user
/// data setup is not allowed to return Result because there would be no reasonable
/// course of action on error.
pub async fn new<F>(
application_id: serenity::ApplicationId,
client_builder: serenity::ClientBuilder<'_>,
user_data_setup: F,
options: FrameworkOptions<U, E>,
) -> Result<std::sync::Arc<Self>, serenity::Error>
where
F: Send
+ Sync
+'static
+ for<'a> FnOnce(
&'a serenity::Context,
&'a serenity::Ready,
&'a Self,
) -> BoxFuture<'a, Result<U, E>>,
U: Send + Sync +'static,
E: Send +'static,
{
let self_1 = std::sync::Arc::new(Self {
user_data: once_cell::sync::OnceCell::new(),
user_data_setup: std::sync::Mutex::new(Some(Box::new(user_data_setup))),
bot_id: serenity::parse_token(client_builder.get_token().trim_start_matches("Bot "))
.expect("Invalid bot token")
.bot_user_id,
// To break up the circular dependency (framework setup -> client setup -> event handler
// -> framework), we initialize this with None and then immediately fill in once the
// client is created
client: std::sync::Mutex::new(None),
options,
application_id,
shard_manager: std::sync::Mutex::new(None),
});
let self_2 = self_1.clone();
let event_handler = EventWrapper(move |ctx, event| {
let self_2 = std::sync::Arc::clone(&self_2);
Box::pin(async move {
self_2.event(ctx, event).await;
}) as _
});
let client: Client = client_builder
.application_id(application_id.0)
.event_handler(event_handler)
.await?;
*self_1.shard_manager.lock().unwrap() = Some(client.shard_manager.clone());
*self_1.client.lock().unwrap() = Some(client);
Ok(self_1)
}
/// Start the framework.
///
/// Takes a `serenity::ClientBuilder`, in which you need to supply the bot token, as well as
/// any gateway intents.
pub async fn start(self: std::sync::Arc<Self>) -> Result<(), serenity::Error>
where
U: Send + Sync +'static,
E: Send +'static,
{
let mut client = self
.client
.lock()
.unwrap()
.take()
.expect("Prepared client is missing");
let edit_track_cache_purge_task = tokio::spawn(async move {
loop {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
edit_tracker.write().unwrap().purge();
}
// not sure if the purging interval should be configurable
tokio::time::sleep(std::time::Duration::from_secs(60)).await;
}
});
// This will run for as long as the bot is active
client.start().await?;
edit_track_cache_purge_task.abort();
Ok(())
}
/// Return the stored framework options, including commands.
pub fn options(&self) -> &FrameworkOptions<U, E> { | pub fn application_id(&self) -> serenity::ApplicationId {
self.application_id
}
/// Returns the serenity's client shard manager.
pub fn shard_manager(&self) -> std::sync::Arc<tokio::sync::Mutex<ShardManager>> {
self.shard_manager
.lock()
.unwrap()
.clone()
.expect("fatal: shard manager not stored in framework initialization")
}
async fn get_user_data(&self) -> &U {
// We shouldn't get a Message event before a Ready event. But if we do, wait until
// the Ready event does come and the resulting data has arrived.
loop {
match self.user_data.get() {
Some(x) => break x,
None => tokio::time::sleep(std::time::Duration::from_millis(100)).await,
}
}
}
async fn event(&self, ctx: serenity::Context, event: Event<'_>)
where
U: Send + Sync,
{
match &event {
Event::Ready { data_about_bot } => {
let user_data_setup = Option::take(&mut *self.user_data_setup.lock().unwrap());
if let Some(user_data_setup) = user_data_setup {
match user_data_setup(&ctx, data_about_bot, self).await {
Ok(user_data) => {
let _: Result<_, _> = self.user_data.set(user_data);
}
Err(e) => (self.options.on_error)(e, ErrorContext::Setup).await,
}
} else {
// discarding duplicate Discord bot ready event
// (happens regularly when bot is online for long period of time)
}
}
Event::Message { new_message } => {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, new_message, false).await
{
if let Some(on_error) = ctx.command.options.on_error {
(on_error)(err, ctx).await;
} else {
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(ctx)),
)
.await;
}
}
}
Event::MessageUpdate { event,.. } => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let msg = edit_tracker.write().unwrap().process_message_update(
event,
self.options().prefix_options.ignore_edit_tracker_cache,
);
if let Some(msg) = msg {
if let Err(Some((err, ctx))) =
prefix::dispatch_message(self, &ctx, &msg, true).await
{
(self.options.on_error)(
err,
crate::ErrorContext::Command(crate::CommandErrorContext::Prefix(
ctx,
)),
)
.await;
}
}
}
}
Event::MessageDelete {
deleted_message_id,..
} => {
if let Some(edit_tracker) = &self.options.prefix_options.edit_tracker {
let bot_response = edit_tracker
.write()
.unwrap()
.find_bot_response(*deleted_message_id)
.cloned();
if let Some(bot_response) = bot_response {
if let Err(e) = bot_response.delete(&ctx).await {
println!(
"Warning: couldn't delete bot response when user deleted message: {}",
e
);
}
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::ApplicationCommand(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_interaction(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(
e,
ErrorContext::Command(CommandErrorContext::Application(error_ctx)),
)
.await;
}
}
}
Event::InteractionCreate {
interaction: serenity::Interaction::Autocomplete(interaction),
} => {
if let Err(Some((e, error_ctx))) = slash::dispatch_autocomplete(
self,
&ctx,
interaction,
&std::sync::atomic::AtomicBool::new(false),
)
.await
{
if let Some(on_error) = error_ctx.ctx.command.options().on_error {
on_error(e, error_ctx).await;
} else {
(self.options.on_error)(e, ErrorContext::Autocomplete(error_ctx)).await;
}
}
}
_ => {}
}
// Do this after the framework's Ready handling, so that self.get_user_data() doesnt
// potentially block infinitely
if let Err(e) =
(self.options.listener)(&ctx, &event, self, self.get_user_data().await).await
{
(self.options.on_error)(e, ErrorContext::Listener(&event));
}
}
} | &self.options
}
/// Returns the application ID given to the framework on its creation. | random_line_split |
render.rs | extern crate gl;
extern crate libc;
use vecmath::Vec2;
use gl::types::*;
use std::ffi::CString;
use libc::{c_char, c_int};
use std::mem::{uninitialized, transmute, size_of};
use std::ptr;
use std::slice;
use std::vec::Vec;
use assets;
macro_rules! check_error(
() => (
match gl::GetError() {
gl::NO_ERROR => {}
gl::INVALID_ENUM => panic!("Invalid enum!"),
gl::INVALID_VALUE => panic!("Invalid value!"),
gl::INVALID_OPERATION => panic!("Invalid operation!"),
gl::INVALID_FRAMEBUFFER_OPERATION => panic!("Invalid framebuffer operation?!"),
gl::OUT_OF_MEMORY => panic!("Out of memory bro!!!!!!!"),
_ => panic!("I DON'T KNOW. FULL BANANNACAKES.")
}
)
);
extern "C" {
fn stbi_load(
filename: *const c_char,
x: *mut c_int,
y: *mut c_int,
components: *mut c_int,
force_components: c_int
) -> *const u8;
fn stbi_image_free(ptr: *const u8);
}
// Global GL game state
pub struct GLData {
pub vao: GLuint,
pub square_vbo: GLuint,
pub square_ebo: GLuint,
pub images: assets::Images,
pub shaders: assets::Shaders
}
// TODO all this stuff is no longer used and is managed by assets instead.
pub static ATTR_VERTEX_POS: u32 = 0;
pub static ATTR_POSITION: u32 = 1;
pub static ATTR_FRAME: u32 = 2;
pub static ATTR_FLIPPED: u32 = 3;
pub static FRAME_UNIFORM_MAX: i64 = 256;
pub static STANDARD_VERTEX: &'static str = "
#version 330 core
// Per vertex, normalized:
layout (location = 0) in vec2 vertex_pos;
// Per instance:
layout (location = 1) in vec2 position; // in pixels
layout (location = 2) in int frame;
layout (location = 3) in int flipped; // actually a bool
// NOTE up this if you run into problems
uniform vec2[256] frames;
uniform vec2 screen_size;
uniform vec2 cam_pos; // in pixels
uniform vec2 sprite_size; // in pixels
uniform float scale;
out vec2 texcoord;
const vec2 TEXCOORD_FROM_ID[4] = vec2[4](
vec2(1.0, 1.0), vec2(1.0, 0.0),
vec2(0.0, 0.0), vec2(0.0, 1.0)
);
vec2 from_pixel(vec2 pos)
{
return pos / screen_size;
}
int flipped_vertex_id()
{
return 3 - gl_VertexID;
}
void main()
{
vec2 pixel_screen_pos = (position - cam_pos) * 2;
gl_Position = vec4(
(vertex_pos * from_pixel(sprite_size) + from_pixel(pixel_screen_pos)) * scale,
0.0f, 1.0f
);
int index = flipped!= 0? flipped_vertex_id() : gl_VertexID;
if (frame == -1)
texcoord = TEXCOORD_FROM_ID[index];
else
texcoord = frames[frame * 4 + index];
texcoord.y = 1 - texcoord.y;
}
";
pub static STANDARD_FRAGMENT: &'static str = "
#version 330 core
in vec2 texcoord;
out vec4 color;
uniform sampler2D tex;
void main()
{
color = texture(tex, texcoord);
}
";
macro_rules! check_log(
($typ:expr, $get_iv:ident | $get_log:ident $val:ident $status:ident $on_error:ident) => (
unsafe {
let mut status = 0;
gl::$get_iv($val, gl::$status, &mut status);
if status == 0 {
let mut len = 0;
gl::$get_iv($val, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::with_capacity(len as usize - 1);
for _ in (0..len-1) { buf.push(0); }
gl::$get_log($val, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
match String::from_utf8(buf) {
Ok(error_message) => $on_error!("{}", error_message),
Err(e) => $on_error!("Error parsing OpenGL error message: {}", e)
}
false
} else {
println!("I THINK THE {} COMPILED", $typ);
true
}
}
)
);
macro_rules! make_shader(
(($name:expr): $shader_type:ident) => (
unsafe {
let sh = gl::CreateShader(gl::$shader_type);
let shader_src_str = CString::new($name).unwrap();
gl::ShaderSource(sh, 1, &shader_src_str.as_ptr(), ptr::null());
gl::CompileShader(sh);
sh
}
)
);
pub struct Texcoords {
pub top_right: Vec2<GLfloat>,
pub bottom_right: Vec2<GLfloat>,
pub bottom_left: Vec2<GLfloat>,
pub top_left: Vec2<GLfloat>
}
impl Texcoords {
pub unsafe fn copy_to(&self, dest: *mut Texcoords) {
ptr::copy(self, dest, 1);
}
}
// Represents an animation frame; a square section of a Texture.
pub struct Frame {
pub position: Vec2<f32>,
pub size: Vec2<f32>,
// Texcoords are generated via #generate_texcoords.
pub texcoords: Texcoords
}
impl Frame {
pub fn generate_texcoords(&mut self, tex_width: f32, tex_height: f32) {
let ref position = self.position;
let ref size = self.size;
// TODO SIMD this son of a bitch
self.texcoords = Texcoords {
top_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y + size.y) / tex_height
),
bottom_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y) / tex_height
),
bottom_left: Vec2::new(
(position.x) / tex_width,
(position.y) / tex_height
),
top_left: Vec2::new(
(position.x) / tex_width,
(position.y + size.y) / tex_height
)
};
}
}
// Represents an actual texture that is currently on the GPU.
#[allow(missing_copy_implementations)]
pub struct Texture {
pub id: GLuint,
pub width: i32,
pub height: i32,
pub filename: &'static str,
pub frame_texcoords_size: i64,
pub texcoords_space: *mut [Texcoords]
}
impl Texture {
pub fn set_full(&self, sampler_uniform: GLint, sprite_size_uniform: GLint) {
unsafe {
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, self.width as f32, self.height as f32);
}
}
#[inline]
pub fn texcoords(&self) -> &[Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
#[inline]
pub fn texcoords_mut(&mut self) -> &mut [Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
// NOTE this expects #generate_texcoords_buffer to have been called
// if there are frames.
pub fn set(&self, sampler_uniform: GLint,
sprite_size_uniform: GLint,
frames_uniform: GLint,
width: f32, height: f32) {
unsafe {
assert!(self.frame_texcoords_size / 8 < FRAME_UNIFORM_MAX);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, width as f32, height as f32);
let frames_len = self.texcoords().len();
if frames_len > 0 {
gl::Uniform2fv(
frames_uniform,
frames_len as GLint * 4,
transmute(&(&*self.texcoords_space)[0])
);
}
}
}
/*
fn put_texcoord(&mut self, index: usize, texcoord: Texcoords) {
self.texcoords_mut()[index] = texcoord;
}
*/
// NOTE this should be properly merged with add_frames.
pub fn generate_texcoords_buffer(
&mut self, frame_width: usize, frame_height: usize, space: *mut [Texcoords]
) {
unsafe {
let frames_len = (*space).len();
let mut frames = Vec::<Frame>::with_capacity(frames_len);
self.add_frames(&mut frames, frame_width, frame_height);
assert_eq!(frames.len(), frames_len); // PLZ
self.texcoords_space = space;
for i in (0..frames_len) {
frames[i].texcoords.copy_to(&mut self.texcoords_mut()[i]);
}
}
}
// Fill the given slice with frames of the given width and height. "
// So this is now called only by #generate_texcoords_buffer
pub fn add_frames(&mut self, space: &mut Vec<Frame>, uwidth: usize, uheight: usize) {
let count = space.capacity();
let tex_width = self.width as f32;
let tex_height = self.height as f32;
let width = uwidth as f32;
let height = uheight as f32;
{
let mut current_pos = Vec2::<f32>::new(0.0, tex_height - height);
for _ in (0..count) {
if current_pos.x + width > tex_width {
current_pos.x = 0.0;
current_pos.y -= height;
}
if current_pos.y < 0.0 {
panic!(
"Too many frames! Asked for {} {}x{} frames on a {}x{} texture.",
count, width, height, tex_width, tex_height
);
}
let mut frame = Frame {
position: current_pos,
size: Vec2::new(width, height),
texcoords: unsafe { uninitialized() }
};
frame.generate_texcoords(tex_width, tex_height);
space.push(frame);
current_pos.x += width;
}
}
self.frame_texcoords_size = size_of::<Texcoords>() as i64 * count as i64;
}
// TODO man, should this be a destructor?
// A: NO
pub fn unload(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.id);
}
}
}
// NOTE don't instantiate these willy nilly!
pub struct ImageAsset {
// I don't like wasting space with the pointer here, but
// it's hard to pass gl_data to a method called on this
// because of the borrow checker...
pub gl_data: *const GLData,
pub filename: &'static str,
pub vbo: GLuint,
pub set_attributes: extern "Rust" fn(GLuint),
pub shader: extern "Rust" fn(&GLData) -> &assets::Shader,
pub attributes_size: usize,
pub texture: Texture,
pub frame_width: usize,
pub frame_height: usize,
pub texcoord_count: usize,
// The next texcoord_count * size_of::<Texcoords>() bytes
// should be free for this struct to use.
}
impl ImageAsset {
pub unsafe fn texcoords(&mut self) -> &mut [Texcoords] |
pub fn loaded(&self) -> bool { self.vbo!= 0 }
pub unsafe fn load(&mut self) {
let mut texture = load_texture(self.filename);
texture.generate_texcoords_buffer(self.frame_width, self.frame_height, self.texcoords());
self.texture = texture;
gl::GenBuffers(1, &mut self.vbo);
}
pub unsafe fn empty_buffer_data(&mut self, count: i64, draw: GLenum) {
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
count * self.attributes_size as GLsizeiptr,
ptr::null(),
draw
);
}
// Sets the texture, and the attributes
pub unsafe fn set(&mut self) {
let set_attributes = self.set_attributes;
let get_shader = self.shader;
let shader = get_shader(transmute(self.gl_data));
gl::UseProgram(shader.program);
self.texture.set(
shader.tex_uniform,
shader.sprite_size_uniform,
shader.frames_uniform,
self.frame_width as f32, self.frame_height as f32
);
set_attributes(self.vbo);
}
pub unsafe fn unload(&mut self) {
panic!("Unloading doesn't work yet hahahaha!");
}
}
// Load a texture from the given filename into the GPU
// memory, returning a struct holding the OpenGL ID and
// dimensions.
pub fn load_texture(filename: &'static str) -> Texture {
let mut width = 0; let mut height = 0; let mut comp = 0;
let mut tex_id: GLuint = 0;
unsafe {
let cfilename = CString::new(filename.to_string()).unwrap();
let img = stbi_load(cfilename.as_ptr(), &mut width, &mut height, &mut comp, 4);
assert_eq!(comp, 4);
gl::GenTextures(1, &mut tex_id);
gl::BindTexture(gl::TEXTURE_2D, tex_id);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as GLint);
println!("Sending {} to GPU. Width: {} Height: {}", filename, width, height);
gl::TexImage2D(
gl::TEXTURE_2D, 0, gl::RGBA as i32,
width, height, 0, gl::RGBA,
gl::UNSIGNED_BYTE, transmute(img)
);
stbi_image_free(img);
}
Texture {
id: tex_id,
width: width,
height: height,
filename: filename,
frame_texcoords_size: 0,
texcoords_space: &mut []
}
}
pub fn create_program(vert: String, frag: String) -> Option<GLuint> {
let vert_id = make_shader!((vert): VERTEX_SHADER);
let vert_result: bool = check_log!(
"VERTEX SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!vert_result {
unsafe { gl::DeleteShader(vert_id); }
return None;
}
let frag_id = make_shader!((frag): FRAGMENT_SHADER);
let frag_result: bool = check_log!(
"FRAGMENT SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!frag_result {
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
let program_id = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program_id, vert_id);
gl::AttachShader(program_id, frag_id);
gl::LinkProgram(program_id);
}
let link_result = check_log!(
"SHADER PROGRAM",
GetProgramiv | GetProgramInfoLog
program_id LINK_STATUS
println
);
if!link_result {
unsafe { gl::DeleteProgram(program_id); }
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
unsafe {
gl::DeleteShader(vert_id);
gl::DeleteShader(frag_id);
}
Some(program_id)
}
| {
let count_ptr: *mut usize = &mut self.texcoord_count;
slice::from_raw_parts_mut::<Texcoords>(
transmute(count_ptr.offset(1)),
self.texcoord_count
)
} | identifier_body |
render.rs | extern crate gl;
extern crate libc;
use vecmath::Vec2;
use gl::types::*;
use std::ffi::CString;
use libc::{c_char, c_int};
use std::mem::{uninitialized, transmute, size_of};
use std::ptr;
use std::slice;
use std::vec::Vec;
use assets;
macro_rules! check_error(
() => (
match gl::GetError() {
gl::NO_ERROR => {}
gl::INVALID_ENUM => panic!("Invalid enum!"),
gl::INVALID_VALUE => panic!("Invalid value!"),
gl::INVALID_OPERATION => panic!("Invalid operation!"),
gl::INVALID_FRAMEBUFFER_OPERATION => panic!("Invalid framebuffer operation?!"),
gl::OUT_OF_MEMORY => panic!("Out of memory bro!!!!!!!"),
_ => panic!("I DON'T KNOW. FULL BANANNACAKES.")
}
)
);
extern "C" {
fn stbi_load(
filename: *const c_char,
x: *mut c_int,
y: *mut c_int,
components: *mut c_int,
force_components: c_int
) -> *const u8;
fn stbi_image_free(ptr: *const u8);
}
// Global GL game state
pub struct GLData {
pub vao: GLuint,
pub square_vbo: GLuint,
pub square_ebo: GLuint,
pub images: assets::Images,
pub shaders: assets::Shaders
}
// TODO all this stuff is no longer used and is managed by assets instead.
pub static ATTR_VERTEX_POS: u32 = 0;
pub static ATTR_POSITION: u32 = 1;
pub static ATTR_FRAME: u32 = 2;
pub static ATTR_FLIPPED: u32 = 3;
pub static FRAME_UNIFORM_MAX: i64 = 256;
pub static STANDARD_VERTEX: &'static str = "
#version 330 core
// Per vertex, normalized:
layout (location = 0) in vec2 vertex_pos;
// Per instance:
layout (location = 1) in vec2 position; // in pixels
layout (location = 2) in int frame;
layout (location = 3) in int flipped; // actually a bool
// NOTE up this if you run into problems
uniform vec2[256] frames;
uniform vec2 screen_size;
uniform vec2 cam_pos; // in pixels
uniform vec2 sprite_size; // in pixels
uniform float scale;
out vec2 texcoord;
const vec2 TEXCOORD_FROM_ID[4] = vec2[4](
vec2(1.0, 1.0), vec2(1.0, 0.0),
vec2(0.0, 0.0), vec2(0.0, 1.0)
);
vec2 from_pixel(vec2 pos)
{
return pos / screen_size;
}
int flipped_vertex_id()
{
return 3 - gl_VertexID;
}
void main()
{
vec2 pixel_screen_pos = (position - cam_pos) * 2;
gl_Position = vec4(
(vertex_pos * from_pixel(sprite_size) + from_pixel(pixel_screen_pos)) * scale,
0.0f, 1.0f
);
int index = flipped!= 0? flipped_vertex_id() : gl_VertexID;
if (frame == -1)
texcoord = TEXCOORD_FROM_ID[index];
else
texcoord = frames[frame * 4 + index];
texcoord.y = 1 - texcoord.y;
}
";
pub static STANDARD_FRAGMENT: &'static str = "
#version 330 core
in vec2 texcoord;
out vec4 color;
uniform sampler2D tex;
void main()
{
color = texture(tex, texcoord);
}
";
macro_rules! check_log(
($typ:expr, $get_iv:ident | $get_log:ident $val:ident $status:ident $on_error:ident) => (
unsafe {
let mut status = 0;
gl::$get_iv($val, gl::$status, &mut status);
if status == 0 {
let mut len = 0;
gl::$get_iv($val, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::with_capacity(len as usize - 1);
for _ in (0..len-1) { buf.push(0); }
gl::$get_log($val, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
match String::from_utf8(buf) {
Ok(error_message) => $on_error!("{}", error_message),
Err(e) => $on_error!("Error parsing OpenGL error message: {}", e)
}
false
} else {
println!("I THINK THE {} COMPILED", $typ);
true
}
}
)
);
macro_rules! make_shader(
(($name:expr): $shader_type:ident) => (
unsafe {
let sh = gl::CreateShader(gl::$shader_type);
let shader_src_str = CString::new($name).unwrap();
gl::ShaderSource(sh, 1, &shader_src_str.as_ptr(), ptr::null());
gl::CompileShader(sh);
sh
}
)
);
pub struct Texcoords {
pub top_right: Vec2<GLfloat>,
pub bottom_right: Vec2<GLfloat>,
pub bottom_left: Vec2<GLfloat>,
pub top_left: Vec2<GLfloat>
}
impl Texcoords {
pub unsafe fn copy_to(&self, dest: *mut Texcoords) {
ptr::copy(self, dest, 1);
}
}
// Represents an animation frame; a square section of a Texture.
pub struct Frame {
pub position: Vec2<f32>,
pub size: Vec2<f32>,
// Texcoords are generated via #generate_texcoords.
pub texcoords: Texcoords
}
impl Frame {
pub fn generate_texcoords(&mut self, tex_width: f32, tex_height: f32) {
let ref position = self.position;
let ref size = self.size;
// TODO SIMD this son of a bitch
self.texcoords = Texcoords {
top_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y + size.y) / tex_height
),
bottom_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y) / tex_height
),
bottom_left: Vec2::new(
(position.x) / tex_width,
(position.y) / tex_height
),
top_left: Vec2::new(
(position.x) / tex_width,
(position.y + size.y) / tex_height
)
};
}
}
// Represents an actual texture that is currently on the GPU.
#[allow(missing_copy_implementations)]
pub struct Texture {
pub id: GLuint,
pub width: i32,
pub height: i32,
pub filename: &'static str,
pub frame_texcoords_size: i64,
pub texcoords_space: *mut [Texcoords]
}
impl Texture {
pub fn set_full(&self, sampler_uniform: GLint, sprite_size_uniform: GLint) {
unsafe {
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, self.width as f32, self.height as f32);
}
}
#[inline]
pub fn texcoords(&self) -> &[Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
#[inline]
pub fn texcoords_mut(&mut self) -> &mut [Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
// NOTE this expects #generate_texcoords_buffer to have been called
// if there are frames.
pub fn set(&self, sampler_uniform: GLint,
sprite_size_uniform: GLint,
frames_uniform: GLint,
width: f32, height: f32) {
unsafe {
assert!(self.frame_texcoords_size / 8 < FRAME_UNIFORM_MAX);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, width as f32, height as f32);
let frames_len = self.texcoords().len();
if frames_len > 0 |
}
}
/*
fn put_texcoord(&mut self, index: usize, texcoord: Texcoords) {
self.texcoords_mut()[index] = texcoord;
}
*/
// NOTE this should be properly merged with add_frames.
pub fn generate_texcoords_buffer(
&mut self, frame_width: usize, frame_height: usize, space: *mut [Texcoords]
) {
unsafe {
let frames_len = (*space).len();
let mut frames = Vec::<Frame>::with_capacity(frames_len);
self.add_frames(&mut frames, frame_width, frame_height);
assert_eq!(frames.len(), frames_len); // PLZ
self.texcoords_space = space;
for i in (0..frames_len) {
frames[i].texcoords.copy_to(&mut self.texcoords_mut()[i]);
}
}
}
// Fill the given slice with frames of the given width and height. "
// So this is now called only by #generate_texcoords_buffer
pub fn add_frames(&mut self, space: &mut Vec<Frame>, uwidth: usize, uheight: usize) {
let count = space.capacity();
let tex_width = self.width as f32;
let tex_height = self.height as f32;
let width = uwidth as f32;
let height = uheight as f32;
{
let mut current_pos = Vec2::<f32>::new(0.0, tex_height - height);
for _ in (0..count) {
if current_pos.x + width > tex_width {
current_pos.x = 0.0;
current_pos.y -= height;
}
if current_pos.y < 0.0 {
panic!(
"Too many frames! Asked for {} {}x{} frames on a {}x{} texture.",
count, width, height, tex_width, tex_height
);
}
let mut frame = Frame {
position: current_pos,
size: Vec2::new(width, height),
texcoords: unsafe { uninitialized() }
};
frame.generate_texcoords(tex_width, tex_height);
space.push(frame);
current_pos.x += width;
}
}
self.frame_texcoords_size = size_of::<Texcoords>() as i64 * count as i64;
}
// TODO man, should this be a destructor?
// A: NO
pub fn unload(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.id);
}
}
}
// NOTE don't instantiate these willy nilly!
pub struct ImageAsset {
// I don't like wasting space with the pointer here, but
// it's hard to pass gl_data to a method called on this
// because of the borrow checker...
pub gl_data: *const GLData,
pub filename: &'static str,
pub vbo: GLuint,
pub set_attributes: extern "Rust" fn(GLuint),
pub shader: extern "Rust" fn(&GLData) -> &assets::Shader,
pub attributes_size: usize,
pub texture: Texture,
pub frame_width: usize,
pub frame_height: usize,
pub texcoord_count: usize,
// The next texcoord_count * size_of::<Texcoords>() bytes
// should be free for this struct to use.
}
impl ImageAsset {
pub unsafe fn texcoords(&mut self) -> &mut [Texcoords] {
let count_ptr: *mut usize = &mut self.texcoord_count;
slice::from_raw_parts_mut::<Texcoords>(
transmute(count_ptr.offset(1)),
self.texcoord_count
)
}
pub fn loaded(&self) -> bool { self.vbo!= 0 }
pub unsafe fn load(&mut self) {
let mut texture = load_texture(self.filename);
texture.generate_texcoords_buffer(self.frame_width, self.frame_height, self.texcoords());
self.texture = texture;
gl::GenBuffers(1, &mut self.vbo);
}
pub unsafe fn empty_buffer_data(&mut self, count: i64, draw: GLenum) {
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
count * self.attributes_size as GLsizeiptr,
ptr::null(),
draw
);
}
// Sets the texture, and the attributes
pub unsafe fn set(&mut self) {
let set_attributes = self.set_attributes;
let get_shader = self.shader;
let shader = get_shader(transmute(self.gl_data));
gl::UseProgram(shader.program);
self.texture.set(
shader.tex_uniform,
shader.sprite_size_uniform,
shader.frames_uniform,
self.frame_width as f32, self.frame_height as f32
);
set_attributes(self.vbo);
}
pub unsafe fn unload(&mut self) {
panic!("Unloading doesn't work yet hahahaha!");
}
}
// Load a texture from the given filename into the GPU
// memory, returning a struct holding the OpenGL ID and
// dimensions.
pub fn load_texture(filename: &'static str) -> Texture {
let mut width = 0; let mut height = 0; let mut comp = 0;
let mut tex_id: GLuint = 0;
unsafe {
let cfilename = CString::new(filename.to_string()).unwrap();
let img = stbi_load(cfilename.as_ptr(), &mut width, &mut height, &mut comp, 4);
assert_eq!(comp, 4);
gl::GenTextures(1, &mut tex_id);
gl::BindTexture(gl::TEXTURE_2D, tex_id);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as GLint);
println!("Sending {} to GPU. Width: {} Height: {}", filename, width, height);
gl::TexImage2D(
gl::TEXTURE_2D, 0, gl::RGBA as i32,
width, height, 0, gl::RGBA,
gl::UNSIGNED_BYTE, transmute(img)
);
stbi_image_free(img);
}
Texture {
id: tex_id,
width: width,
height: height,
filename: filename,
frame_texcoords_size: 0,
texcoords_space: &mut []
}
}
pub fn create_program(vert: String, frag: String) -> Option<GLuint> {
let vert_id = make_shader!((vert): VERTEX_SHADER);
let vert_result: bool = check_log!(
"VERTEX SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!vert_result {
unsafe { gl::DeleteShader(vert_id); }
return None;
}
let frag_id = make_shader!((frag): FRAGMENT_SHADER);
let frag_result: bool = check_log!(
"FRAGMENT SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!frag_result {
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
let program_id = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program_id, vert_id);
gl::AttachShader(program_id, frag_id);
gl::LinkProgram(program_id);
}
let link_result = check_log!(
"SHADER PROGRAM",
GetProgramiv | GetProgramInfoLog
program_id LINK_STATUS
println
);
if!link_result {
unsafe { gl::DeleteProgram(program_id); }
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
unsafe {
gl::DeleteShader(vert_id);
gl::DeleteShader(frag_id);
}
Some(program_id)
}
| {
gl::Uniform2fv(
frames_uniform,
frames_len as GLint * 4,
transmute(&(&*self.texcoords_space)[0])
);
} | conditional_block |
render.rs | extern crate gl;
extern crate libc;
use vecmath::Vec2;
use gl::types::*;
use std::ffi::CString;
use libc::{c_char, c_int};
use std::mem::{uninitialized, transmute, size_of};
use std::ptr;
use std::slice;
use std::vec::Vec;
use assets;
macro_rules! check_error(
() => (
match gl::GetError() {
gl::NO_ERROR => {}
gl::INVALID_ENUM => panic!("Invalid enum!"),
gl::INVALID_VALUE => panic!("Invalid value!"),
gl::INVALID_OPERATION => panic!("Invalid operation!"),
gl::INVALID_FRAMEBUFFER_OPERATION => panic!("Invalid framebuffer operation?!"),
gl::OUT_OF_MEMORY => panic!("Out of memory bro!!!!!!!"),
_ => panic!("I DON'T KNOW. FULL BANANNACAKES.")
}
)
);
extern "C" {
fn stbi_load(
filename: *const c_char,
x: *mut c_int,
y: *mut c_int,
components: *mut c_int,
force_components: c_int
) -> *const u8;
fn stbi_image_free(ptr: *const u8);
}
// Global GL game state
pub struct GLData {
pub vao: GLuint,
pub square_vbo: GLuint,
pub square_ebo: GLuint,
pub images: assets::Images,
pub shaders: assets::Shaders
}
// TODO all this stuff is no longer used and is managed by assets instead.
pub static ATTR_VERTEX_POS: u32 = 0;
pub static ATTR_POSITION: u32 = 1;
pub static ATTR_FRAME: u32 = 2;
pub static ATTR_FLIPPED: u32 = 3;
pub static FRAME_UNIFORM_MAX: i64 = 256;
pub static STANDARD_VERTEX: &'static str = "
#version 330 core
// Per vertex, normalized:
layout (location = 0) in vec2 vertex_pos;
// Per instance:
layout (location = 1) in vec2 position; // in pixels
layout (location = 2) in int frame;
layout (location = 3) in int flipped; // actually a bool
// NOTE up this if you run into problems
uniform vec2[256] frames;
uniform vec2 screen_size;
uniform vec2 cam_pos; // in pixels
uniform vec2 sprite_size; // in pixels
uniform float scale;
out vec2 texcoord;
const vec2 TEXCOORD_FROM_ID[4] = vec2[4](
vec2(1.0, 1.0), vec2(1.0, 0.0),
vec2(0.0, 0.0), vec2(0.0, 1.0)
);
vec2 from_pixel(vec2 pos)
{
return pos / screen_size;
}
int flipped_vertex_id()
{
return 3 - gl_VertexID;
}
void main()
{
vec2 pixel_screen_pos = (position - cam_pos) * 2;
gl_Position = vec4(
(vertex_pos * from_pixel(sprite_size) + from_pixel(pixel_screen_pos)) * scale,
0.0f, 1.0f
);
int index = flipped!= 0? flipped_vertex_id() : gl_VertexID;
if (frame == -1)
texcoord = TEXCOORD_FROM_ID[index];
else
texcoord = frames[frame * 4 + index];
texcoord.y = 1 - texcoord.y;
}
";
pub static STANDARD_FRAGMENT: &'static str = "
#version 330 core
in vec2 texcoord;
out vec4 color;
uniform sampler2D tex;
void main()
{ |
macro_rules! check_log(
($typ:expr, $get_iv:ident | $get_log:ident $val:ident $status:ident $on_error:ident) => (
unsafe {
let mut status = 0;
gl::$get_iv($val, gl::$status, &mut status);
if status == 0 {
let mut len = 0;
gl::$get_iv($val, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::with_capacity(len as usize - 1);
for _ in (0..len-1) { buf.push(0); }
gl::$get_log($val, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
match String::from_utf8(buf) {
Ok(error_message) => $on_error!("{}", error_message),
Err(e) => $on_error!("Error parsing OpenGL error message: {}", e)
}
false
} else {
println!("I THINK THE {} COMPILED", $typ);
true
}
}
)
);
macro_rules! make_shader(
(($name:expr): $shader_type:ident) => (
unsafe {
let sh = gl::CreateShader(gl::$shader_type);
let shader_src_str = CString::new($name).unwrap();
gl::ShaderSource(sh, 1, &shader_src_str.as_ptr(), ptr::null());
gl::CompileShader(sh);
sh
}
)
);
pub struct Texcoords {
pub top_right: Vec2<GLfloat>,
pub bottom_right: Vec2<GLfloat>,
pub bottom_left: Vec2<GLfloat>,
pub top_left: Vec2<GLfloat>
}
impl Texcoords {
pub unsafe fn copy_to(&self, dest: *mut Texcoords) {
ptr::copy(self, dest, 1);
}
}
// Represents an animation frame; a square section of a Texture.
pub struct Frame {
pub position: Vec2<f32>,
pub size: Vec2<f32>,
// Texcoords are generated via #generate_texcoords.
pub texcoords: Texcoords
}
impl Frame {
pub fn generate_texcoords(&mut self, tex_width: f32, tex_height: f32) {
let ref position = self.position;
let ref size = self.size;
// TODO SIMD this son of a bitch
self.texcoords = Texcoords {
top_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y + size.y) / tex_height
),
bottom_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y) / tex_height
),
bottom_left: Vec2::new(
(position.x) / tex_width,
(position.y) / tex_height
),
top_left: Vec2::new(
(position.x) / tex_width,
(position.y + size.y) / tex_height
)
};
}
}
// Represents an actual texture that is currently on the GPU.
#[allow(missing_copy_implementations)]
pub struct Texture {
pub id: GLuint,
pub width: i32,
pub height: i32,
pub filename: &'static str,
pub frame_texcoords_size: i64,
pub texcoords_space: *mut [Texcoords]
}
impl Texture {
pub fn set_full(&self, sampler_uniform: GLint, sprite_size_uniform: GLint) {
unsafe {
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, self.width as f32, self.height as f32);
}
}
#[inline]
pub fn texcoords(&self) -> &[Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
#[inline]
pub fn texcoords_mut(&mut self) -> &mut [Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
// NOTE this expects #generate_texcoords_buffer to have been called
// if there are frames.
pub fn set(&self, sampler_uniform: GLint,
sprite_size_uniform: GLint,
frames_uniform: GLint,
width: f32, height: f32) {
unsafe {
assert!(self.frame_texcoords_size / 8 < FRAME_UNIFORM_MAX);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, width as f32, height as f32);
let frames_len = self.texcoords().len();
if frames_len > 0 {
gl::Uniform2fv(
frames_uniform,
frames_len as GLint * 4,
transmute(&(&*self.texcoords_space)[0])
);
}
}
}
/*
fn put_texcoord(&mut self, index: usize, texcoord: Texcoords) {
self.texcoords_mut()[index] = texcoord;
}
*/
// NOTE this should be properly merged with add_frames.
pub fn generate_texcoords_buffer(
&mut self, frame_width: usize, frame_height: usize, space: *mut [Texcoords]
) {
unsafe {
let frames_len = (*space).len();
let mut frames = Vec::<Frame>::with_capacity(frames_len);
self.add_frames(&mut frames, frame_width, frame_height);
assert_eq!(frames.len(), frames_len); // PLZ
self.texcoords_space = space;
for i in (0..frames_len) {
frames[i].texcoords.copy_to(&mut self.texcoords_mut()[i]);
}
}
}
// Fill the given slice with frames of the given width and height. "
// So this is now called only by #generate_texcoords_buffer
pub fn add_frames(&mut self, space: &mut Vec<Frame>, uwidth: usize, uheight: usize) {
let count = space.capacity();
let tex_width = self.width as f32;
let tex_height = self.height as f32;
let width = uwidth as f32;
let height = uheight as f32;
{
let mut current_pos = Vec2::<f32>::new(0.0, tex_height - height);
for _ in (0..count) {
if current_pos.x + width > tex_width {
current_pos.x = 0.0;
current_pos.y -= height;
}
if current_pos.y < 0.0 {
panic!(
"Too many frames! Asked for {} {}x{} frames on a {}x{} texture.",
count, width, height, tex_width, tex_height
);
}
let mut frame = Frame {
position: current_pos,
size: Vec2::new(width, height),
texcoords: unsafe { uninitialized() }
};
frame.generate_texcoords(tex_width, tex_height);
space.push(frame);
current_pos.x += width;
}
}
self.frame_texcoords_size = size_of::<Texcoords>() as i64 * count as i64;
}
// TODO man, should this be a destructor?
// A: NO
pub fn unload(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.id);
}
}
}
// NOTE don't instantiate these willy nilly!
pub struct ImageAsset {
// I don't like wasting space with the pointer here, but
// it's hard to pass gl_data to a method called on this
// because of the borrow checker...
pub gl_data: *const GLData,
pub filename: &'static str,
pub vbo: GLuint,
pub set_attributes: extern "Rust" fn(GLuint),
pub shader: extern "Rust" fn(&GLData) -> &assets::Shader,
pub attributes_size: usize,
pub texture: Texture,
pub frame_width: usize,
pub frame_height: usize,
pub texcoord_count: usize,
// The next texcoord_count * size_of::<Texcoords>() bytes
// should be free for this struct to use.
}
impl ImageAsset {
pub unsafe fn texcoords(&mut self) -> &mut [Texcoords] {
let count_ptr: *mut usize = &mut self.texcoord_count;
slice::from_raw_parts_mut::<Texcoords>(
transmute(count_ptr.offset(1)),
self.texcoord_count
)
}
pub fn loaded(&self) -> bool { self.vbo!= 0 }
pub unsafe fn load(&mut self) {
let mut texture = load_texture(self.filename);
texture.generate_texcoords_buffer(self.frame_width, self.frame_height, self.texcoords());
self.texture = texture;
gl::GenBuffers(1, &mut self.vbo);
}
pub unsafe fn empty_buffer_data(&mut self, count: i64, draw: GLenum) {
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
count * self.attributes_size as GLsizeiptr,
ptr::null(),
draw
);
}
// Sets the texture, and the attributes
pub unsafe fn set(&mut self) {
let set_attributes = self.set_attributes;
let get_shader = self.shader;
let shader = get_shader(transmute(self.gl_data));
gl::UseProgram(shader.program);
self.texture.set(
shader.tex_uniform,
shader.sprite_size_uniform,
shader.frames_uniform,
self.frame_width as f32, self.frame_height as f32
);
set_attributes(self.vbo);
}
pub unsafe fn unload(&mut self) {
panic!("Unloading doesn't work yet hahahaha!");
}
}
// Load a texture from the given filename into the GPU
// memory, returning a struct holding the OpenGL ID and
// dimensions.
pub fn load_texture(filename: &'static str) -> Texture {
let mut width = 0; let mut height = 0; let mut comp = 0;
let mut tex_id: GLuint = 0;
unsafe {
let cfilename = CString::new(filename.to_string()).unwrap();
let img = stbi_load(cfilename.as_ptr(), &mut width, &mut height, &mut comp, 4);
assert_eq!(comp, 4);
gl::GenTextures(1, &mut tex_id);
gl::BindTexture(gl::TEXTURE_2D, tex_id);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as GLint);
println!("Sending {} to GPU. Width: {} Height: {}", filename, width, height);
gl::TexImage2D(
gl::TEXTURE_2D, 0, gl::RGBA as i32,
width, height, 0, gl::RGBA,
gl::UNSIGNED_BYTE, transmute(img)
);
stbi_image_free(img);
}
Texture {
id: tex_id,
width: width,
height: height,
filename: filename,
frame_texcoords_size: 0,
texcoords_space: &mut []
}
}
pub fn create_program(vert: String, frag: String) -> Option<GLuint> {
let vert_id = make_shader!((vert): VERTEX_SHADER);
let vert_result: bool = check_log!(
"VERTEX SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!vert_result {
unsafe { gl::DeleteShader(vert_id); }
return None;
}
let frag_id = make_shader!((frag): FRAGMENT_SHADER);
let frag_result: bool = check_log!(
"FRAGMENT SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!frag_result {
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
let program_id = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program_id, vert_id);
gl::AttachShader(program_id, frag_id);
gl::LinkProgram(program_id);
}
let link_result = check_log!(
"SHADER PROGRAM",
GetProgramiv | GetProgramInfoLog
program_id LINK_STATUS
println
);
if!link_result {
unsafe { gl::DeleteProgram(program_id); }
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
unsafe {
gl::DeleteShader(vert_id);
gl::DeleteShader(frag_id);
}
Some(program_id)
} | color = texture(tex, texcoord);
}
"; | random_line_split |
render.rs | extern crate gl;
extern crate libc;
use vecmath::Vec2;
use gl::types::*;
use std::ffi::CString;
use libc::{c_char, c_int};
use std::mem::{uninitialized, transmute, size_of};
use std::ptr;
use std::slice;
use std::vec::Vec;
use assets;
macro_rules! check_error(
() => (
match gl::GetError() {
gl::NO_ERROR => {}
gl::INVALID_ENUM => panic!("Invalid enum!"),
gl::INVALID_VALUE => panic!("Invalid value!"),
gl::INVALID_OPERATION => panic!("Invalid operation!"),
gl::INVALID_FRAMEBUFFER_OPERATION => panic!("Invalid framebuffer operation?!"),
gl::OUT_OF_MEMORY => panic!("Out of memory bro!!!!!!!"),
_ => panic!("I DON'T KNOW. FULL BANANNACAKES.")
}
)
);
extern "C" {
fn stbi_load(
filename: *const c_char,
x: *mut c_int,
y: *mut c_int,
components: *mut c_int,
force_components: c_int
) -> *const u8;
fn stbi_image_free(ptr: *const u8);
}
// Global GL game state
pub struct GLData {
pub vao: GLuint,
pub square_vbo: GLuint,
pub square_ebo: GLuint,
pub images: assets::Images,
pub shaders: assets::Shaders
}
// TODO all this stuff is no longer used and is managed by assets instead.
pub static ATTR_VERTEX_POS: u32 = 0;
pub static ATTR_POSITION: u32 = 1;
pub static ATTR_FRAME: u32 = 2;
pub static ATTR_FLIPPED: u32 = 3;
pub static FRAME_UNIFORM_MAX: i64 = 256;
pub static STANDARD_VERTEX: &'static str = "
#version 330 core
// Per vertex, normalized:
layout (location = 0) in vec2 vertex_pos;
// Per instance:
layout (location = 1) in vec2 position; // in pixels
layout (location = 2) in int frame;
layout (location = 3) in int flipped; // actually a bool
// NOTE up this if you run into problems
uniform vec2[256] frames;
uniform vec2 screen_size;
uniform vec2 cam_pos; // in pixels
uniform vec2 sprite_size; // in pixels
uniform float scale;
out vec2 texcoord;
const vec2 TEXCOORD_FROM_ID[4] = vec2[4](
vec2(1.0, 1.0), vec2(1.0, 0.0),
vec2(0.0, 0.0), vec2(0.0, 1.0)
);
vec2 from_pixel(vec2 pos)
{
return pos / screen_size;
}
int flipped_vertex_id()
{
return 3 - gl_VertexID;
}
void main()
{
vec2 pixel_screen_pos = (position - cam_pos) * 2;
gl_Position = vec4(
(vertex_pos * from_pixel(sprite_size) + from_pixel(pixel_screen_pos)) * scale,
0.0f, 1.0f
);
int index = flipped!= 0? flipped_vertex_id() : gl_VertexID;
if (frame == -1)
texcoord = TEXCOORD_FROM_ID[index];
else
texcoord = frames[frame * 4 + index];
texcoord.y = 1 - texcoord.y;
}
";
pub static STANDARD_FRAGMENT: &'static str = "
#version 330 core
in vec2 texcoord;
out vec4 color;
uniform sampler2D tex;
void main()
{
color = texture(tex, texcoord);
}
";
macro_rules! check_log(
($typ:expr, $get_iv:ident | $get_log:ident $val:ident $status:ident $on_error:ident) => (
unsafe {
let mut status = 0;
gl::$get_iv($val, gl::$status, &mut status);
if status == 0 {
let mut len = 0;
gl::$get_iv($val, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::with_capacity(len as usize - 1);
for _ in (0..len-1) { buf.push(0); }
gl::$get_log($val, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
match String::from_utf8(buf) {
Ok(error_message) => $on_error!("{}", error_message),
Err(e) => $on_error!("Error parsing OpenGL error message: {}", e)
}
false
} else {
println!("I THINK THE {} COMPILED", $typ);
true
}
}
)
);
macro_rules! make_shader(
(($name:expr): $shader_type:ident) => (
unsafe {
let sh = gl::CreateShader(gl::$shader_type);
let shader_src_str = CString::new($name).unwrap();
gl::ShaderSource(sh, 1, &shader_src_str.as_ptr(), ptr::null());
gl::CompileShader(sh);
sh
}
)
);
pub struct Texcoords {
pub top_right: Vec2<GLfloat>,
pub bottom_right: Vec2<GLfloat>,
pub bottom_left: Vec2<GLfloat>,
pub top_left: Vec2<GLfloat>
}
impl Texcoords {
pub unsafe fn copy_to(&self, dest: *mut Texcoords) {
ptr::copy(self, dest, 1);
}
}
// Represents an animation frame; a square section of a Texture.
pub struct Frame {
pub position: Vec2<f32>,
pub size: Vec2<f32>,
// Texcoords are generated via #generate_texcoords.
pub texcoords: Texcoords
}
impl Frame {
pub fn generate_texcoords(&mut self, tex_width: f32, tex_height: f32) {
let ref position = self.position;
let ref size = self.size;
// TODO SIMD this son of a bitch
self.texcoords = Texcoords {
top_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y + size.y) / tex_height
),
bottom_right: Vec2::new(
(position.x + size.x) / tex_width,
(position.y) / tex_height
),
bottom_left: Vec2::new(
(position.x) / tex_width,
(position.y) / tex_height
),
top_left: Vec2::new(
(position.x) / tex_width,
(position.y + size.y) / tex_height
)
};
}
}
// Represents an actual texture that is currently on the GPU.
#[allow(missing_copy_implementations)]
pub struct Texture {
pub id: GLuint,
pub width: i32,
pub height: i32,
pub filename: &'static str,
pub frame_texcoords_size: i64,
pub texcoords_space: *mut [Texcoords]
}
impl Texture {
pub fn set_full(&self, sampler_uniform: GLint, sprite_size_uniform: GLint) {
unsafe {
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, self.width as f32, self.height as f32);
}
}
#[inline]
pub fn texcoords(&self) -> &[Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
#[inline]
pub fn texcoords_mut(&mut self) -> &mut [Texcoords] {
unsafe { transmute(self.texcoords_space) }
}
// NOTE this expects #generate_texcoords_buffer to have been called
// if there are frames.
pub fn set(&self, sampler_uniform: GLint,
sprite_size_uniform: GLint,
frames_uniform: GLint,
width: f32, height: f32) {
unsafe {
assert!(self.frame_texcoords_size / 8 < FRAME_UNIFORM_MAX);
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.id);
gl::Uniform1i(sampler_uniform, 0);
gl::Uniform2f(sprite_size_uniform, width as f32, height as f32);
let frames_len = self.texcoords().len();
if frames_len > 0 {
gl::Uniform2fv(
frames_uniform,
frames_len as GLint * 4,
transmute(&(&*self.texcoords_space)[0])
);
}
}
}
/*
fn put_texcoord(&mut self, index: usize, texcoord: Texcoords) {
self.texcoords_mut()[index] = texcoord;
}
*/
// NOTE this should be properly merged with add_frames.
pub fn generate_texcoords_buffer(
&mut self, frame_width: usize, frame_height: usize, space: *mut [Texcoords]
) {
unsafe {
let frames_len = (*space).len();
let mut frames = Vec::<Frame>::with_capacity(frames_len);
self.add_frames(&mut frames, frame_width, frame_height);
assert_eq!(frames.len(), frames_len); // PLZ
self.texcoords_space = space;
for i in (0..frames_len) {
frames[i].texcoords.copy_to(&mut self.texcoords_mut()[i]);
}
}
}
// Fill the given slice with frames of the given width and height. "
// So this is now called only by #generate_texcoords_buffer
pub fn add_frames(&mut self, space: &mut Vec<Frame>, uwidth: usize, uheight: usize) {
let count = space.capacity();
let tex_width = self.width as f32;
let tex_height = self.height as f32;
let width = uwidth as f32;
let height = uheight as f32;
{
let mut current_pos = Vec2::<f32>::new(0.0, tex_height - height);
for _ in (0..count) {
if current_pos.x + width > tex_width {
current_pos.x = 0.0;
current_pos.y -= height;
}
if current_pos.y < 0.0 {
panic!(
"Too many frames! Asked for {} {}x{} frames on a {}x{} texture.",
count, width, height, tex_width, tex_height
);
}
let mut frame = Frame {
position: current_pos,
size: Vec2::new(width, height),
texcoords: unsafe { uninitialized() }
};
frame.generate_texcoords(tex_width, tex_height);
space.push(frame);
current_pos.x += width;
}
}
self.frame_texcoords_size = size_of::<Texcoords>() as i64 * count as i64;
}
// TODO man, should this be a destructor?
// A: NO
pub fn unload(&mut self) {
unsafe {
gl::DeleteTextures(1, &self.id);
}
}
}
// NOTE don't instantiate these willy nilly!
pub struct ImageAsset {
// I don't like wasting space with the pointer here, but
// it's hard to pass gl_data to a method called on this
// because of the borrow checker...
pub gl_data: *const GLData,
pub filename: &'static str,
pub vbo: GLuint,
pub set_attributes: extern "Rust" fn(GLuint),
pub shader: extern "Rust" fn(&GLData) -> &assets::Shader,
pub attributes_size: usize,
pub texture: Texture,
pub frame_width: usize,
pub frame_height: usize,
pub texcoord_count: usize,
// The next texcoord_count * size_of::<Texcoords>() bytes
// should be free for this struct to use.
}
impl ImageAsset {
pub unsafe fn | (&mut self) -> &mut [Texcoords] {
let count_ptr: *mut usize = &mut self.texcoord_count;
slice::from_raw_parts_mut::<Texcoords>(
transmute(count_ptr.offset(1)),
self.texcoord_count
)
}
pub fn loaded(&self) -> bool { self.vbo!= 0 }
pub unsafe fn load(&mut self) {
let mut texture = load_texture(self.filename);
texture.generate_texcoords_buffer(self.frame_width, self.frame_height, self.texcoords());
self.texture = texture;
gl::GenBuffers(1, &mut self.vbo);
}
pub unsafe fn empty_buffer_data(&mut self, count: i64, draw: GLenum) {
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo);
gl::BufferData(
gl::ARRAY_BUFFER,
count * self.attributes_size as GLsizeiptr,
ptr::null(),
draw
);
}
// Sets the texture, and the attributes
pub unsafe fn set(&mut self) {
let set_attributes = self.set_attributes;
let get_shader = self.shader;
let shader = get_shader(transmute(self.gl_data));
gl::UseProgram(shader.program);
self.texture.set(
shader.tex_uniform,
shader.sprite_size_uniform,
shader.frames_uniform,
self.frame_width as f32, self.frame_height as f32
);
set_attributes(self.vbo);
}
pub unsafe fn unload(&mut self) {
panic!("Unloading doesn't work yet hahahaha!");
}
}
// Load a texture from the given filename into the GPU
// memory, returning a struct holding the OpenGL ID and
// dimensions.
pub fn load_texture(filename: &'static str) -> Texture {
let mut width = 0; let mut height = 0; let mut comp = 0;
let mut tex_id: GLuint = 0;
unsafe {
let cfilename = CString::new(filename.to_string()).unwrap();
let img = stbi_load(cfilename.as_ptr(), &mut width, &mut height, &mut comp, 4);
assert_eq!(comp, 4);
gl::GenTextures(1, &mut tex_id);
gl::BindTexture(gl::TEXTURE_2D, tex_id);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST as GLint);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as GLint);
println!("Sending {} to GPU. Width: {} Height: {}", filename, width, height);
gl::TexImage2D(
gl::TEXTURE_2D, 0, gl::RGBA as i32,
width, height, 0, gl::RGBA,
gl::UNSIGNED_BYTE, transmute(img)
);
stbi_image_free(img);
}
Texture {
id: tex_id,
width: width,
height: height,
filename: filename,
frame_texcoords_size: 0,
texcoords_space: &mut []
}
}
pub fn create_program(vert: String, frag: String) -> Option<GLuint> {
let vert_id = make_shader!((vert): VERTEX_SHADER);
let vert_result: bool = check_log!(
"VERTEX SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!vert_result {
unsafe { gl::DeleteShader(vert_id); }
return None;
}
let frag_id = make_shader!((frag): FRAGMENT_SHADER);
let frag_result: bool = check_log!(
"FRAGMENT SHADER",
GetShaderiv | GetShaderInfoLog
vert_id COMPILE_STATUS
println
);
if!frag_result {
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
let program_id = unsafe { gl::CreateProgram() };
unsafe {
gl::AttachShader(program_id, vert_id);
gl::AttachShader(program_id, frag_id);
gl::LinkProgram(program_id);
}
let link_result = check_log!(
"SHADER PROGRAM",
GetProgramiv | GetProgramInfoLog
program_id LINK_STATUS
println
);
if!link_result {
unsafe { gl::DeleteProgram(program_id); }
unsafe { gl::DeleteShader(vert_id); }
unsafe { gl::DeleteShader(frag_id); }
return None;
}
unsafe {
gl::DeleteShader(vert_id);
gl::DeleteShader(frag_id);
}
Some(program_id)
}
| texcoords | identifier_name |
vault.rs | // ENV: https://www.vaultproject.io/docs/commands/#environment-variables
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt::Debug;
use log::{debug, info, warn};
use reqwest::{Client as HttpClient, ClientBuilder};
use serde::{Deserialize, Serialize};
/// Vault API Client
#[derive(Clone, Debug)]
pub struct Client {
token: crate::Secret,
address: String,
client: HttpClient,
revoke_self_on_drop: bool,
}
/// Generic Vault Response
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
pub enum Response {
/// An error response
Error {
/// List of errors returned from Vault
errors: Vec<String>,
},
/// A successful response
Response(ResponseData),
}
/// Vault General Response Data
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct ResponseData {
/// Request UUID
request_id: String,
/// Lease ID for secrets
lease_id: String,
/// Renewable for secrets
renewable: bool,
/// Lease duration for secrets
lease_duration: u64,
/// Warnings, if any
#[serde(default)]
warnings: Option<Vec<String>>,
/// Auth data for authentication requests
#[serde(default)]
auth: Option<Authentication>,
/// Data for secrets requests
#[serde(default)]
data: Option<HashMap<String, String>>,
// Missing and ignored fields:
// - wrap_info
}
/// Authentication data from Vault
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Authentication {
/// The actual token
pub client_token: crate::Secret,
/// The accessor for the Token
pub accessor: String,
/// List of policies for token, including from Identity
pub policies: Vec<String>,
/// List of tokens directly assigned to token
pub token_policies: Vec<String>,
/// Arbitrary metadata
pub metadata: HashMap<String, String>,
/// Lease Duration for the token
pub lease_duration: u64,
/// Whether the token is renewable
pub renewable: bool,
/// UUID for the entity
pub entity_id: String,
/// Type of token
pub token_type: TokenType,
}
/// Type of token from Vault
/// See [Vault Documentation](https://www.vaultproject.io/docs/concepts/tokens.html#token-types-in-detail)
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum TokenType {
/// Long lived service tokens
Service,
/// Short lived batch tokens
Batch,
}
/// Payload to send to Vault for logging in via AWS IAM
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct AwsIamLoginPayload<'a, 'b> {
pub role: &'a str,
#[serde(borrow, flatten)]
pub aws_payload: Cow<'b, crate::aws::VaultAwsAuthIamPayload>,
}
impl Client {
/// Create a new API client from an existing Token
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
#[allow(clippy::new_ret_no_self)]
pub fn new<S1, S2>(
vault_address: S1,
vault_token: S2,
revoke_self_on_drop: bool,
client: Option<HttpClient>,
) -> Result<Self, crate::Error>
where
S1: AsRef<str>,
S2: AsRef<str>,
{
let client = match client {
Some(client) => client,
None => ClientBuilder::new().build()?,
};
Ok(Self {
address: vault_address.as_ref().to_string(),
token: crate::Secret(vault_token.as_ref().to_string()),
revoke_self_on_drop,
client,
})
}
/// Returns the Vault Token
pub fn token(&self) -> &str {
&self.token
}
/// Returns the Vault address
pub fn address(&self) -> &str {
&self.address
}
/// Returns the HTTP Client
pub fn http_client(&self) -> &HttpClient {
&self.client
}
fn execute_request<T>(client: &HttpClient, request: reqwest::Request) -> Result<T, crate::Error>
where
T: serde::de::DeserializeOwned + Debug,
{
debug!("Executing request: {:#?}", request);
let mut response = client.execute(request)?;
debug!("Response received: {:#?}", response);
let body = response.text()?;
debug!("Response body: {}", body);
let result = serde_json::from_str(&body)?;
debug!("Deserialized body: {:#?}", result);
Ok(result)
}
fn execute_request_no_body(
client: &HttpClient,
request: reqwest::Request,
) -> Result<(), crate::Error> {
debug!("Executing request: {:#?}", request);
let response = client.execute(request)?;
debug!("Response received: {:#?}", response);
Ok(())
}
/// Login with AWS IAM authentication method. Returns a Vault token on success
///
/// - `address`: Address of Vault Server. Include the scheme (e.g. `https`) and the host with an
/// optional port
/// - `path`: Path to the AWS authentication engine. Usually just `aws`.
/// - `role`: Name fo the AWS authentication role
/// - `payload`: Authentication payload from calling `aws::VaultAwsAuthIamPayload::new`
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
pub fn login_aws_iam(
vault_address: &str,
aws_auth_path: &str,
aws_auth_role: &str,
aws_payload: &crate::aws::VaultAwsAuthIamPayload,
client: Option<HttpClient>,
) -> Result<Self, crate::Error> {
info!(
"Logging in to Vault with AWS Credentials at path `{}` and role `{}",
aws_auth_path, aws_auth_role
);
let client = match client {
Some(client) => client,
None => ClientBuilder::new().build()?,
};
let request = Self::build_login_aws_iam_request(
vault_address,
aws_auth_path,
aws_auth_role,
aws_payload,
&client,
)?;
let response: Response = Self::execute_request(&client, request)?;
let token = match response {
Response::Error { errors } => {
Err(crate::Error::InvalidVaultResponse(errors.join("; ")))?
}
Response::Response(ResponseData {
auth: Some(auth),..
}) => Ok(auth.client_token),
_ => Err(crate::Error::InvalidVaultResponse(
"Missing authentication data".to_string(),
)),
}?;
info!("Vault authentication successful. Received Vault Token");
Ok(Self {
address: vault_address.to_string(),
token,
revoke_self_on_drop: true,
client,
})
}
fn build_login_aws_iam_request(
vault_address: &str,
aws_auth_path: &str,
aws_auth_role: &str,
aws_payload: &crate::aws::VaultAwsAuthIamPayload,
client: &HttpClient,
) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(vault_address)?;
let vault_address = vault_address.join(&format!("/v1/auth/{}/login", aws_auth_path))?;
let payload = AwsIamLoginPayload {
role: aws_auth_role,
aws_payload: Cow::Borrowed(aws_payload),
};
Ok(client.post(vault_address).json(&payload).build()?)
}
/// Get a token from Nomad Secrets Engine
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
pub fn get_nomad_token(
&self,
nomad_path: &str,
nomad_role: &str,
) -> Result<crate::Secret, crate::Error> {
info!(
"Retrieving Nomad Token from Secrets engine mounted at `{}` with role `{}`",
nomad_path, nomad_role
);
let request = self.build_nomad_token_request(nomad_path, nomad_role)?;
let response: Response = Self::execute_request(&self.client, request)?;
Ok(From::from(match response {
Response::Error { errors } => {
Err(crate::Error::InvalidVaultResponse(errors.join("; ")))?
}
Response::Response(ResponseData {
data: Some(mut data),
..
}) => data.remove("secret_id").ok_or_else(|| {
crate::Error::InvalidVaultResponse("Missing Nomad token from response".to_string())
})?,
_ => Err(crate::Error::InvalidVaultResponse(
"Missing secrets data".to_string(),
))?,
}))
}
/// Revoke the Vault token itself
///
/// If successful, the Vault Token can no longer be used
pub fn revoke_self(&self) -> Result<(), crate::Error> {
info!("Revoking self Vault Token");
let request = self.build_revoke_self_request()?;
// HTTP 204 is returned
Self::execute_request_no_body(&self.client, request)?;
Ok(())
}
fn build_revoke_self_request(&self) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(self.address())?;
let vault_address = vault_address.join("/v1/auth/token/revoke-self")?;
Ok(self
.client
.post(vault_address)
.header("X-Vault-Token", self.token.as_str())
.build()?)
}
fn build_nomad_token_request(
&self,
nomad_path: &str,
nomad_role: &str,
) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(self.address())?;
let vault_address =
vault_address.join(&format!("/v1/{}/creds/{}", nomad_path, nomad_role))?;
Ok(self
.client
.get(vault_address)
.header("X-Vault-Token", self.token.as_str())
.build()?)
}
}
impl Drop for Client {
fn drop(&mut self) {
if self.revoke_self_on_drop {
info!("Vault Client is being dropped. Revoking its own Token");
match self.revoke_self() {
Ok(()) => {}
Err(e) => warn!("Error revoking self: {}", e),
}
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::env;
pub(crate) fn vault_address() -> String {
env::var("VAULT_ADDR").unwrap_or_else(|_| "http://127.0.0.1:8200".to_string())
}
#[test]
fn login_aws_iam_request_is_built_properly() -> Result<(), crate::Error> {
let address = vault_address();
let aws_payload = crate::aws::tests::vault_aws_iam_payload(None, None)?;
let request = Client::build_login_aws_iam_request(
&address,
"aws",
"default",
&aws_payload,
&ClientBuilder::new().build()?,
)?;
assert_eq!(
format!("{}/v1/auth/aws/login", address),
request.url().to_string()
);
assert_eq!(&reqwest::Method::POST, request.method());
// Can't test payload
Ok(())
}
/// Requires Mock AWS API and Vault server
/// This test does not verify if the signature from rusoto is correct.
#[test]
fn login_aws_with_vault_is_successful() -> Result<(), crate::Error> {
let address = vault_address();
let aws_payload =
crate::aws::tests::vault_aws_iam_payload(Some("vault.example.com"), None)?;
let client = Client::login_aws_iam(&address, "aws", "default", &aws_payload, None)?;
assert!(!client.token().is_empty());
Ok(())
}
#[test]
fn nomad_token_secrets_engine_payload_can_be_deserialized() {
// Example payload from Nomad Secrets Engine
// e.g. `vault read nomad/creds/default`
let json = r#"
{
"request_id": "xxx4",
"lease_id": "nomad/creds/default/xxx",
"lease_duration": 2764800,
"renewable": true,
"data": {
"accessor_id": "accessor",
"secret_id": "secret"
},
"warnings": null
}
"#;
let data = match serde_json::from_str::<Response>(json).unwrap() {
Response::Response(ResponseData { data,.. }) => data,
_ => panic!("Invalid deserialization"),
};
let nomad = data.unwrap();
assert_eq!(nomad["secret_id"], "secret");
}
#[test]
fn | () -> Result<(), crate::Error> {
let client = Client::new(vault_address(), "vault_token", false, None)?;
let request = client.build_nomad_token_request("nomad", "default")?;
assert_eq!(
format!("{}/v1/nomad/creds/default", vault_address()),
request.url().to_string()
);
assert_eq!(&reqwest::Method::GET, request.method());
let actual_token = request.headers().get("X-Vault-Token");
assert!(actual_token.is_some());
assert_eq!("vault_token", actual_token.unwrap());
Ok(())
}
}
| nomad_token_request_is_built_properly | identifier_name |
vault.rs | // ENV: https://www.vaultproject.io/docs/commands/#environment-variables
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt::Debug;
use log::{debug, info, warn};
use reqwest::{Client as HttpClient, ClientBuilder};
use serde::{Deserialize, Serialize};
/// Vault API Client
#[derive(Clone, Debug)]
pub struct Client {
token: crate::Secret,
address: String,
client: HttpClient,
revoke_self_on_drop: bool,
}
/// Generic Vault Response
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
pub enum Response {
/// An error response
Error {
/// List of errors returned from Vault
errors: Vec<String>,
},
/// A successful response
Response(ResponseData),
}
/// Vault General Response Data
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct ResponseData {
/// Request UUID
request_id: String,
/// Lease ID for secrets
lease_id: String,
/// Renewable for secrets
renewable: bool,
/// Lease duration for secrets
lease_duration: u64,
/// Warnings, if any
#[serde(default)]
warnings: Option<Vec<String>>,
/// Auth data for authentication requests
#[serde(default)]
auth: Option<Authentication>,
/// Data for secrets requests
#[serde(default)]
data: Option<HashMap<String, String>>,
// Missing and ignored fields:
// - wrap_info
}
/// Authentication data from Vault
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Authentication {
/// The actual token
pub client_token: crate::Secret,
/// The accessor for the Token
pub accessor: String,
/// List of policies for token, including from Identity
pub policies: Vec<String>,
/// List of tokens directly assigned to token
pub token_policies: Vec<String>,
/// Arbitrary metadata
pub metadata: HashMap<String, String>,
/// Lease Duration for the token
pub lease_duration: u64,
/// Whether the token is renewable
pub renewable: bool,
/// UUID for the entity
pub entity_id: String,
/// Type of token
pub token_type: TokenType,
}
/// Type of token from Vault
/// See [Vault Documentation](https://www.vaultproject.io/docs/concepts/tokens.html#token-types-in-detail)
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum TokenType {
/// Long lived service tokens
Service,
/// Short lived batch tokens
Batch,
}
/// Payload to send to Vault for logging in via AWS IAM
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct AwsIamLoginPayload<'a, 'b> {
pub role: &'a str,
#[serde(borrow, flatten)]
pub aws_payload: Cow<'b, crate::aws::VaultAwsAuthIamPayload>,
}
impl Client {
/// Create a new API client from an existing Token
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
#[allow(clippy::new_ret_no_self)]
pub fn new<S1, S2>(
vault_address: S1,
vault_token: S2,
revoke_self_on_drop: bool,
client: Option<HttpClient>,
) -> Result<Self, crate::Error>
where
S1: AsRef<str>,
S2: AsRef<str>,
{
let client = match client {
Some(client) => client,
None => ClientBuilder::new().build()?,
};
Ok(Self {
address: vault_address.as_ref().to_string(),
token: crate::Secret(vault_token.as_ref().to_string()),
revoke_self_on_drop,
client,
})
}
/// Returns the Vault Token
pub fn token(&self) -> &str {
&self.token
}
/// Returns the Vault address
pub fn address(&self) -> &str {
&self.address
}
/// Returns the HTTP Client
pub fn http_client(&self) -> &HttpClient {
&self.client
}
fn execute_request<T>(client: &HttpClient, request: reqwest::Request) -> Result<T, crate::Error>
where
T: serde::de::DeserializeOwned + Debug,
{
debug!("Executing request: {:#?}", request);
let mut response = client.execute(request)?;
debug!("Response received: {:#?}", response);
let body = response.text()?;
debug!("Response body: {}", body);
let result = serde_json::from_str(&body)?;
debug!("Deserialized body: {:#?}", result);
Ok(result)
}
fn execute_request_no_body(
client: &HttpClient,
request: reqwest::Request,
) -> Result<(), crate::Error> {
debug!("Executing request: {:#?}", request);
let response = client.execute(request)?;
debug!("Response received: {:#?}", response);
Ok(())
}
/// Login with AWS IAM authentication method. Returns a Vault token on success
///
/// - `address`: Address of Vault Server. Include the scheme (e.g. `https`) and the host with an
/// optional port
/// - `path`: Path to the AWS authentication engine. Usually just `aws`.
/// - `role`: Name fo the AWS authentication role
/// - `payload`: Authentication payload from calling `aws::VaultAwsAuthIamPayload::new`
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
pub fn login_aws_iam(
vault_address: &str,
aws_auth_path: &str,
aws_auth_role: &str,
aws_payload: &crate::aws::VaultAwsAuthIamPayload,
client: Option<HttpClient>,
) -> Result<Self, crate::Error> {
info!(
"Logging in to Vault with AWS Credentials at path `{}` and role `{}",
aws_auth_path, aws_auth_role
);
let client = match client {
Some(client) => client,
None => ClientBuilder::new().build()?,
};
let request = Self::build_login_aws_iam_request(
vault_address,
aws_auth_path,
aws_auth_role,
aws_payload,
&client,
)?;
let response: Response = Self::execute_request(&client, request)?;
let token = match response {
Response::Error { errors } => {
Err(crate::Error::InvalidVaultResponse(errors.join("; ")))?
}
Response::Response(ResponseData {
auth: Some(auth),..
}) => Ok(auth.client_token),
_ => Err(crate::Error::InvalidVaultResponse(
"Missing authentication data".to_string(),
)),
}?;
info!("Vault authentication successful. Received Vault Token");
Ok(Self {
address: vault_address.to_string(),
token,
revoke_self_on_drop: true,
client,
})
}
fn build_login_aws_iam_request(
vault_address: &str,
aws_auth_path: &str,
aws_auth_role: &str,
aws_payload: &crate::aws::VaultAwsAuthIamPayload,
client: &HttpClient,
) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(vault_address)?;
let vault_address = vault_address.join(&format!("/v1/auth/{}/login", aws_auth_path))?;
let payload = AwsIamLoginPayload {
role: aws_auth_role,
aws_payload: Cow::Borrowed(aws_payload),
};
Ok(client.post(vault_address).json(&payload).build()?)
}
/// Get a token from Nomad Secrets Engine
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
pub fn get_nomad_token(
&self,
nomad_path: &str,
nomad_role: &str,
) -> Result<crate::Secret, crate::Error> {
info!(
"Retrieving Nomad Token from Secrets engine mounted at `{}` with role `{}`",
nomad_path, nomad_role
);
let request = self.build_nomad_token_request(nomad_path, nomad_role)?;
let response: Response = Self::execute_request(&self.client, request)?;
Ok(From::from(match response {
Response::Error { errors } => {
Err(crate::Error::InvalidVaultResponse(errors.join("; ")))?
}
Response::Response(ResponseData {
data: Some(mut data),
..
}) => data.remove("secret_id").ok_or_else(|| {
crate::Error::InvalidVaultResponse("Missing Nomad token from response".to_string())
})?,
_ => Err(crate::Error::InvalidVaultResponse(
"Missing secrets data".to_string(),
))?,
}))
}
/// Revoke the Vault token itself
///
/// If successful, the Vault Token can no longer be used
pub fn revoke_self(&self) -> Result<(), crate::Error> {
info!("Revoking self Vault Token");
let request = self.build_revoke_self_request()?;
// HTTP 204 is returned
Self::execute_request_no_body(&self.client, request)?;
Ok(())
}
fn build_revoke_self_request(&self) -> Result<reqwest::Request, crate::Error> |
fn build_nomad_token_request(
&self,
nomad_path: &str,
nomad_role: &str,
) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(self.address())?;
let vault_address =
vault_address.join(&format!("/v1/{}/creds/{}", nomad_path, nomad_role))?;
Ok(self
.client
.get(vault_address)
.header("X-Vault-Token", self.token.as_str())
.build()?)
}
}
impl Drop for Client {
fn drop(&mut self) {
if self.revoke_self_on_drop {
info!("Vault Client is being dropped. Revoking its own Token");
match self.revoke_self() {
Ok(()) => {}
Err(e) => warn!("Error revoking self: {}", e),
}
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::env;
pub(crate) fn vault_address() -> String {
env::var("VAULT_ADDR").unwrap_or_else(|_| "http://127.0.0.1:8200".to_string())
}
#[test]
fn login_aws_iam_request_is_built_properly() -> Result<(), crate::Error> {
let address = vault_address();
let aws_payload = crate::aws::tests::vault_aws_iam_payload(None, None)?;
let request = Client::build_login_aws_iam_request(
&address,
"aws",
"default",
&aws_payload,
&ClientBuilder::new().build()?,
)?;
assert_eq!(
format!("{}/v1/auth/aws/login", address),
request.url().to_string()
);
assert_eq!(&reqwest::Method::POST, request.method());
// Can't test payload
Ok(())
}
/// Requires Mock AWS API and Vault server
/// This test does not verify if the signature from rusoto is correct.
#[test]
fn login_aws_with_vault_is_successful() -> Result<(), crate::Error> {
let address = vault_address();
let aws_payload =
crate::aws::tests::vault_aws_iam_payload(Some("vault.example.com"), None)?;
let client = Client::login_aws_iam(&address, "aws", "default", &aws_payload, None)?;
assert!(!client.token().is_empty());
Ok(())
}
#[test]
fn nomad_token_secrets_engine_payload_can_be_deserialized() {
// Example payload from Nomad Secrets Engine
// e.g. `vault read nomad/creds/default`
let json = r#"
{
"request_id": "xxx4",
"lease_id": "nomad/creds/default/xxx",
"lease_duration": 2764800,
"renewable": true,
"data": {
"accessor_id": "accessor",
"secret_id": "secret"
},
"warnings": null
}
"#;
let data = match serde_json::from_str::<Response>(json).unwrap() {
Response::Response(ResponseData { data,.. }) => data,
_ => panic!("Invalid deserialization"),
};
let nomad = data.unwrap();
assert_eq!(nomad["secret_id"], "secret");
}
#[test]
fn nomad_token_request_is_built_properly() -> Result<(), crate::Error> {
let client = Client::new(vault_address(), "vault_token", false, None)?;
let request = client.build_nomad_token_request("nomad", "default")?;
assert_eq!(
format!("{}/v1/nomad/creds/default", vault_address()),
request.url().to_string()
);
assert_eq!(&reqwest::Method::GET, request.method());
let actual_token = request.headers().get("X-Vault-Token");
assert!(actual_token.is_some());
assert_eq!("vault_token", actual_token.unwrap());
Ok(())
}
}
| {
let vault_address = url::Url::parse(self.address())?;
let vault_address = vault_address.join("/v1/auth/token/revoke-self")?;
Ok(self
.client
.post(vault_address)
.header("X-Vault-Token", self.token.as_str())
.build()?)
} | identifier_body |
vault.rs | // ENV: https://www.vaultproject.io/docs/commands/#environment-variables
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt::Debug;
use log::{debug, info, warn};
use reqwest::{Client as HttpClient, ClientBuilder};
use serde::{Deserialize, Serialize};
/// Vault API Client
#[derive(Clone, Debug)]
pub struct Client {
token: crate::Secret,
address: String,
client: HttpClient,
revoke_self_on_drop: bool,
}
/// Generic Vault Response
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
pub enum Response {
/// An error response
Error {
/// List of errors returned from Vault
errors: Vec<String>,
},
/// A successful response
Response(ResponseData),
}
/// Vault General Response Data
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct ResponseData {
/// Request UUID
request_id: String,
/// Lease ID for secrets
lease_id: String,
/// Renewable for secrets
renewable: bool,
/// Lease duration for secrets
lease_duration: u64,
/// Warnings, if any
#[serde(default)]
warnings: Option<Vec<String>>,
/// Auth data for authentication requests
#[serde(default)]
auth: Option<Authentication>,
/// Data for secrets requests
#[serde(default)]
data: Option<HashMap<String, String>>,
// Missing and ignored fields:
// - wrap_info
}
/// Authentication data from Vault
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Authentication {
/// The actual token | /// List of tokens directly assigned to token
pub token_policies: Vec<String>,
/// Arbitrary metadata
pub metadata: HashMap<String, String>,
/// Lease Duration for the token
pub lease_duration: u64,
/// Whether the token is renewable
pub renewable: bool,
/// UUID for the entity
pub entity_id: String,
/// Type of token
pub token_type: TokenType,
}
/// Type of token from Vault
/// See [Vault Documentation](https://www.vaultproject.io/docs/concepts/tokens.html#token-types-in-detail)
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum TokenType {
/// Long lived service tokens
Service,
/// Short lived batch tokens
Batch,
}
/// Payload to send to Vault for logging in via AWS IAM
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct AwsIamLoginPayload<'a, 'b> {
pub role: &'a str,
#[serde(borrow, flatten)]
pub aws_payload: Cow<'b, crate::aws::VaultAwsAuthIamPayload>,
}
impl Client {
/// Create a new API client from an existing Token
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
#[allow(clippy::new_ret_no_self)]
pub fn new<S1, S2>(
vault_address: S1,
vault_token: S2,
revoke_self_on_drop: bool,
client: Option<HttpClient>,
) -> Result<Self, crate::Error>
where
S1: AsRef<str>,
S2: AsRef<str>,
{
let client = match client {
Some(client) => client,
None => ClientBuilder::new().build()?,
};
Ok(Self {
address: vault_address.as_ref().to_string(),
token: crate::Secret(vault_token.as_ref().to_string()),
revoke_self_on_drop,
client,
})
}
/// Returns the Vault Token
pub fn token(&self) -> &str {
&self.token
}
/// Returns the Vault address
pub fn address(&self) -> &str {
&self.address
}
/// Returns the HTTP Client
pub fn http_client(&self) -> &HttpClient {
&self.client
}
fn execute_request<T>(client: &HttpClient, request: reqwest::Request) -> Result<T, crate::Error>
where
T: serde::de::DeserializeOwned + Debug,
{
debug!("Executing request: {:#?}", request);
let mut response = client.execute(request)?;
debug!("Response received: {:#?}", response);
let body = response.text()?;
debug!("Response body: {}", body);
let result = serde_json::from_str(&body)?;
debug!("Deserialized body: {:#?}", result);
Ok(result)
}
fn execute_request_no_body(
client: &HttpClient,
request: reqwest::Request,
) -> Result<(), crate::Error> {
debug!("Executing request: {:#?}", request);
let response = client.execute(request)?;
debug!("Response received: {:#?}", response);
Ok(())
}
/// Login with AWS IAM authentication method. Returns a Vault token on success
///
/// - `address`: Address of Vault Server. Include the scheme (e.g. `https`) and the host with an
/// optional port
/// - `path`: Path to the AWS authentication engine. Usually just `aws`.
/// - `role`: Name fo the AWS authentication role
/// - `payload`: Authentication payload from calling `aws::VaultAwsAuthIamPayload::new`
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
pub fn login_aws_iam(
vault_address: &str,
aws_auth_path: &str,
aws_auth_role: &str,
aws_payload: &crate::aws::VaultAwsAuthIamPayload,
client: Option<HttpClient>,
) -> Result<Self, crate::Error> {
info!(
"Logging in to Vault with AWS Credentials at path `{}` and role `{}",
aws_auth_path, aws_auth_role
);
let client = match client {
Some(client) => client,
None => ClientBuilder::new().build()?,
};
let request = Self::build_login_aws_iam_request(
vault_address,
aws_auth_path,
aws_auth_role,
aws_payload,
&client,
)?;
let response: Response = Self::execute_request(&client, request)?;
let token = match response {
Response::Error { errors } => {
Err(crate::Error::InvalidVaultResponse(errors.join("; ")))?
}
Response::Response(ResponseData {
auth: Some(auth),..
}) => Ok(auth.client_token),
_ => Err(crate::Error::InvalidVaultResponse(
"Missing authentication data".to_string(),
)),
}?;
info!("Vault authentication successful. Received Vault Token");
Ok(Self {
address: vault_address.to_string(),
token,
revoke_self_on_drop: true,
client,
})
}
fn build_login_aws_iam_request(
vault_address: &str,
aws_auth_path: &str,
aws_auth_role: &str,
aws_payload: &crate::aws::VaultAwsAuthIamPayload,
client: &HttpClient,
) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(vault_address)?;
let vault_address = vault_address.join(&format!("/v1/auth/{}/login", aws_auth_path))?;
let payload = AwsIamLoginPayload {
role: aws_auth_role,
aws_payload: Cow::Borrowed(aws_payload),
};
Ok(client.post(vault_address).json(&payload).build()?)
}
/// Get a token from Nomad Secrets Engine
///
/// You can optionally provide a `reqwest::Client` if you have specific needs like custom root
/// CA certificate or require client authentication
pub fn get_nomad_token(
&self,
nomad_path: &str,
nomad_role: &str,
) -> Result<crate::Secret, crate::Error> {
info!(
"Retrieving Nomad Token from Secrets engine mounted at `{}` with role `{}`",
nomad_path, nomad_role
);
let request = self.build_nomad_token_request(nomad_path, nomad_role)?;
let response: Response = Self::execute_request(&self.client, request)?;
Ok(From::from(match response {
Response::Error { errors } => {
Err(crate::Error::InvalidVaultResponse(errors.join("; ")))?
}
Response::Response(ResponseData {
data: Some(mut data),
..
}) => data.remove("secret_id").ok_or_else(|| {
crate::Error::InvalidVaultResponse("Missing Nomad token from response".to_string())
})?,
_ => Err(crate::Error::InvalidVaultResponse(
"Missing secrets data".to_string(),
))?,
}))
}
/// Revoke the Vault token itself
///
/// If successful, the Vault Token can no longer be used
pub fn revoke_self(&self) -> Result<(), crate::Error> {
info!("Revoking self Vault Token");
let request = self.build_revoke_self_request()?;
// HTTP 204 is returned
Self::execute_request_no_body(&self.client, request)?;
Ok(())
}
fn build_revoke_self_request(&self) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(self.address())?;
let vault_address = vault_address.join("/v1/auth/token/revoke-self")?;
Ok(self
.client
.post(vault_address)
.header("X-Vault-Token", self.token.as_str())
.build()?)
}
fn build_nomad_token_request(
&self,
nomad_path: &str,
nomad_role: &str,
) -> Result<reqwest::Request, crate::Error> {
let vault_address = url::Url::parse(self.address())?;
let vault_address =
vault_address.join(&format!("/v1/{}/creds/{}", nomad_path, nomad_role))?;
Ok(self
.client
.get(vault_address)
.header("X-Vault-Token", self.token.as_str())
.build()?)
}
}
impl Drop for Client {
fn drop(&mut self) {
if self.revoke_self_on_drop {
info!("Vault Client is being dropped. Revoking its own Token");
match self.revoke_self() {
Ok(()) => {}
Err(e) => warn!("Error revoking self: {}", e),
}
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::env;
pub(crate) fn vault_address() -> String {
env::var("VAULT_ADDR").unwrap_or_else(|_| "http://127.0.0.1:8200".to_string())
}
#[test]
fn login_aws_iam_request_is_built_properly() -> Result<(), crate::Error> {
let address = vault_address();
let aws_payload = crate::aws::tests::vault_aws_iam_payload(None, None)?;
let request = Client::build_login_aws_iam_request(
&address,
"aws",
"default",
&aws_payload,
&ClientBuilder::new().build()?,
)?;
assert_eq!(
format!("{}/v1/auth/aws/login", address),
request.url().to_string()
);
assert_eq!(&reqwest::Method::POST, request.method());
// Can't test payload
Ok(())
}
/// Requires Mock AWS API and Vault server
/// This test does not verify if the signature from rusoto is correct.
#[test]
fn login_aws_with_vault_is_successful() -> Result<(), crate::Error> {
let address = vault_address();
let aws_payload =
crate::aws::tests::vault_aws_iam_payload(Some("vault.example.com"), None)?;
let client = Client::login_aws_iam(&address, "aws", "default", &aws_payload, None)?;
assert!(!client.token().is_empty());
Ok(())
}
#[test]
fn nomad_token_secrets_engine_payload_can_be_deserialized() {
// Example payload from Nomad Secrets Engine
// e.g. `vault read nomad/creds/default`
let json = r#"
{
"request_id": "xxx4",
"lease_id": "nomad/creds/default/xxx",
"lease_duration": 2764800,
"renewable": true,
"data": {
"accessor_id": "accessor",
"secret_id": "secret"
},
"warnings": null
}
"#;
let data = match serde_json::from_str::<Response>(json).unwrap() {
Response::Response(ResponseData { data,.. }) => data,
_ => panic!("Invalid deserialization"),
};
let nomad = data.unwrap();
assert_eq!(nomad["secret_id"], "secret");
}
#[test]
fn nomad_token_request_is_built_properly() -> Result<(), crate::Error> {
let client = Client::new(vault_address(), "vault_token", false, None)?;
let request = client.build_nomad_token_request("nomad", "default")?;
assert_eq!(
format!("{}/v1/nomad/creds/default", vault_address()),
request.url().to_string()
);
assert_eq!(&reqwest::Method::GET, request.method());
let actual_token = request.headers().get("X-Vault-Token");
assert!(actual_token.is_some());
assert_eq!("vault_token", actual_token.unwrap());
Ok(())
}
} | pub client_token: crate::Secret,
/// The accessor for the Token
pub accessor: String,
/// List of policies for token, including from Identity
pub policies: Vec<String>, | random_line_split |
timer.rs | use std::fmt;
use std::mem;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::{Arc, Mutex, Weak};
use std::task::{Context, Poll};
use std::time::Instant;
use std::future::Future;
use super::AtomicWaker;
use super::{global, ArcList, Heap, HeapTimer, Node, Slot};
/// A "timer heap" used to power separately owned instances of `Delay`.
///
/// This timer is implemented as a priority queued-based heap. Each `Timer`
/// contains a few primary methods which which to drive it:
///
/// * `next_wake` indicates how long the ambient system needs to sleep until it
/// invokes further processing on a `Timer`
/// * `advance_to` is what actually fires timers on the `Timer`, and should be
/// called essentially every iteration of the event loop, or when the time
/// specified by `next_wake` has elapsed.
/// * The `Future` implementation for `Timer` is used to process incoming timer
/// updates and requests. This is used to schedule new timeouts, update
/// existing ones, or delete existing timeouts. The `Future` implementation
/// will never resolve, but it'll schedule notifications of when to wake up
/// and process more messages.
///
/// Note that if you're using this crate you probably don't need to use a
/// `Timer` as there is a global one already available for you run on a helper
/// thread. If this isn't desirable, though, then the
/// `TimerHandle::set_fallback` method can be used instead!
pub struct Timer {
inner: Arc<Inner>,
timer_heap: Heap<HeapTimer>,
}
/// A handle to a `Timer` which is used to create instances of a `Delay`.
#[derive(Clone)]
pub struct TimerHandle {
pub(crate) inner: Weak<Inner>,
}
pub(crate) struct Inner {
/// List of updates the `Timer` needs to process
pub(crate) list: ArcList<ScheduledTimer>,
/// The blocked `Timer` task to receive notifications to the `list` above.
pub(crate) waker: AtomicWaker,
}
/// Shared state between the `Timer` and a `Delay`.
pub(crate) struct ScheduledTimer {
pub(crate) waker: AtomicWaker,
// The lowest bit here is whether the timer has fired or not, the second
// lowest bit is whether the timer has been invalidated, and all the other
// bits are the "generation" of the timer which is reset during the `reset`
// function. Only timers for a matching generation are fired.
pub(crate) state: AtomicUsize,
pub(crate) inner: Weak<Inner>,
pub(crate) at: Mutex<Option<Instant>>,
// TODO: this is only accessed by the timer thread, should have a more
// lightweight protection than a `Mutex`
pub(crate) slot: Mutex<Option<Slot>>,
}
impl Timer {
/// Creates a new timer heap ready to create new timers.
pub fn new() -> Timer {
Timer {
inner: Arc::new(Inner {
list: ArcList::new(),
waker: AtomicWaker::new(),
}),
timer_heap: Heap::new(),
}
}
/// Returns a handle to this timer heap, used to create new timeouts.
pub fn handle(&self) -> TimerHandle {
TimerHandle {
inner: Arc::downgrade(&self.inner),
}
}
/// Returns the time at which this timer next needs to be invoked with
/// `advance_to`.
///
/// Event loops or threads typically want to sleep until the specified
/// instant.
pub fn next_event(&self) -> Option<Instant> {
self.timer_heap.peek().map(|t| t.at)
}
/// Proces any timers which are supposed to fire at or before the current
/// instant.
///
/// This method is equivalent to `self.advance_to(Instant::now())`.
pub fn advance(&mut self) {
self.advance_to(Instant::now())
}
/// Proces any timers which are supposed to fire before `now` specified.
///
/// This method should be called on `Timer` periodically to advance the
/// internal state and process any pending timers which need to fire.
pub fn advance_to(&mut self, now: Instant) {
loop {
match self.timer_heap.peek() {
Some(head) if head.at <= now => {}
Some(_) => break,
None => break,
};
// Flag the timer as fired and then notify its task, if any, that's
// blocked.
let heap_timer = self.timer_heap.pop().unwrap();
*heap_timer.node.slot.lock().unwrap() = None;
let bits = heap_timer.gen << 2;
match heap_timer
.node
.state
.compare_exchange(bits, bits | 0b01, SeqCst, SeqCst)
{
Ok(_) => heap_timer.node.waker.wake(),
Err(_b) => {}
}
}
}
/// Either updates the timer at slot `idx` to fire at `at`, or adds a new
/// timer at `idx` and sets it to fire at `at`.
fn update_or_add(&mut self, at: Instant, node: Arc<Node<ScheduledTimer>>) {
// TODO: avoid remove + push and instead just do one sift of the heap?
// In theory we could update it in place and then do the percolation
// as necessary
let gen = node.state.load(SeqCst) >> 2;
let mut slot = node.slot.lock().unwrap();
if let Some(heap_slot) = slot.take() {
self.timer_heap.remove(heap_slot);
}
*slot = Some(self.timer_heap.push(HeapTimer {
at,
gen,
node: node.clone(),
}));
}
fn remove(&mut self, node: Arc<Node<ScheduledTimer>>) {
// If this `idx` is still around and it's still got a registered timer,
// then we jettison it form the timer heap.
let mut slot = node.slot.lock().unwrap();
let heap_slot = match slot.take() {
Some(slot) => slot,
None => return,
};
self.timer_heap.remove(heap_slot);
}
fn invalidate(&mut self, node: Arc<Node<ScheduledTimer>>) {
node.state.fetch_or(0b10, SeqCst);
node.waker.wake();
}
}
impl Future for Timer {
type Output = ();
fn | (mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).waker.register(cx.waker());
let mut list = self.inner.list.take();
while let Some(node) = list.pop() {
let at = *node.at.lock().unwrap();
match at {
Some(at) => self.update_or_add(at, node),
None => self.remove(node),
}
}
Poll::Pending
}
}
impl Drop for Timer {
fn drop(&mut self) {
// Seal off our list to prevent any more updates from getting pushed on.
// Any timer which sees an error from the push will immediately become
// inert.
let mut list = self.inner.list.take_and_seal();
// Now that we'll never receive another timer, drain the list of all
// updates and also drain our heap of all active timers, invalidating
// everything.
while let Some(t) = list.pop() {
self.invalidate(t);
}
while let Some(t) = self.timer_heap.pop() {
self.invalidate(t.node);
}
}
}
impl fmt::Debug for Timer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("Timer").field("heap", &"...").finish()
}
}
impl Default for Timer {
fn default() -> Self {
Self::new()
}
}
static HANDLE_FALLBACK: AtomicUsize = AtomicUsize::new(0);
/// Error returned from `TimerHandle::set_fallback`.
#[derive(Clone, Debug)]
struct SetDefaultError(());
impl TimerHandle {
/// Configures this timer handle to be the one returned by
/// `TimerHandle::default`.
///
/// By default a global thread is initialized on the first call to
/// `TimerHandle::default`. This first call can happen transitively through
/// `Delay::new`. If, however, that hasn't happened yet then the global
/// default timer handle can be configured through this method.
///
/// This method can be used to prevent the global helper thread from
/// spawning. If this method is successful then the global helper thread
/// will never get spun up.
///
/// On success this timer handle will have installed itself globally to be
/// used as the return value for `TimerHandle::default` unless otherwise
/// specified.
///
/// # Errors
///
/// If another thread has already called `set_as_global_fallback` or this
/// thread otherwise loses a race to call this method then it will fail
/// returning an error. Once a call to `set_as_global_fallback` is
/// successful then no future calls may succeed.
fn set_as_global_fallback(self) -> Result<(), SetDefaultError> {
unsafe {
let val = self.into_usize();
match HANDLE_FALLBACK.compare_exchange(0, val, SeqCst, SeqCst) {
Ok(_) => Ok(()),
Err(_) => {
drop(TimerHandle::from_usize(val));
Err(SetDefaultError(()))
}
}
}
}
fn into_usize(self) -> usize {
unsafe { mem::transmute::<Weak<Inner>, usize>(self.inner) }
}
unsafe fn from_usize(val: usize) -> TimerHandle {
let inner = mem::transmute::<usize, Weak<Inner>>(val);
TimerHandle { inner }
}
}
impl Default for TimerHandle {
fn default() -> TimerHandle {
let mut fallback = HANDLE_FALLBACK.load(SeqCst);
// If the fallback hasn't been previously initialized then let's spin
// up a helper thread and try to initialize with that. If we can't
// actually create a helper thread then we'll just return a "defunkt"
// handle which will return errors when timer objects are attempted to
// be associated.
if fallback == 0 {
let helper = match global::HelperThread::new() {
Ok(helper) => helper,
Err(_) => return TimerHandle { inner: Weak::new() },
};
// If we successfully set ourselves as the actual fallback then we
// want to `forget` the helper thread to ensure that it persists
// globally. If we fail to set ourselves as the fallback that means
// that someone was racing with this call to
// `TimerHandle::default`. They ended up winning so we'll destroy
// our helper thread (which shuts down the thread) and reload the
// fallback.
if helper.handle().set_as_global_fallback().is_ok() {
let ret = helper.handle();
helper.forget();
return ret;
}
fallback = HANDLE_FALLBACK.load(SeqCst);
}
// At this point our fallback handle global was configured so we use
// its value to reify a handle, clone it, and then forget our reified
// handle as we don't actually have an owning reference to it.
assert!(fallback!= 0);
unsafe {
let handle = TimerHandle::from_usize(fallback);
let ret = handle.clone();
let _ = handle.into_usize();
ret
}
}
}
impl fmt::Debug for TimerHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("TimerHandle")
.field("inner", &"...")
.finish()
}
}
| poll | identifier_name |
timer.rs | use std::fmt;
use std::mem;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::{Arc, Mutex, Weak};
use std::task::{Context, Poll};
use std::time::Instant;
use std::future::Future;
use super::AtomicWaker;
use super::{global, ArcList, Heap, HeapTimer, Node, Slot};
/// A "timer heap" used to power separately owned instances of `Delay`.
///
/// This timer is implemented as a priority queued-based heap. Each `Timer`
/// contains a few primary methods which which to drive it:
///
/// * `next_wake` indicates how long the ambient system needs to sleep until it
/// invokes further processing on a `Timer`
/// * `advance_to` is what actually fires timers on the `Timer`, and should be
/// called essentially every iteration of the event loop, or when the time
/// specified by `next_wake` has elapsed.
/// * The `Future` implementation for `Timer` is used to process incoming timer
/// updates and requests. This is used to schedule new timeouts, update
/// existing ones, or delete existing timeouts. The `Future` implementation
/// will never resolve, but it'll schedule notifications of when to wake up
/// and process more messages.
///
/// Note that if you're using this crate you probably don't need to use a
/// `Timer` as there is a global one already available for you run on a helper
/// thread. If this isn't desirable, though, then the
/// `TimerHandle::set_fallback` method can be used instead!
pub struct Timer {
inner: Arc<Inner>,
timer_heap: Heap<HeapTimer>,
}
/// A handle to a `Timer` which is used to create instances of a `Delay`.
#[derive(Clone)]
pub struct TimerHandle {
pub(crate) inner: Weak<Inner>,
}
pub(crate) struct Inner {
/// List of updates the `Timer` needs to process
pub(crate) list: ArcList<ScheduledTimer>,
/// The blocked `Timer` task to receive notifications to the `list` above.
pub(crate) waker: AtomicWaker,
}
/// Shared state between the `Timer` and a `Delay`.
pub(crate) struct ScheduledTimer {
pub(crate) waker: AtomicWaker,
// The lowest bit here is whether the timer has fired or not, the second
// lowest bit is whether the timer has been invalidated, and all the other
// bits are the "generation" of the timer which is reset during the `reset`
// function. Only timers for a matching generation are fired.
pub(crate) state: AtomicUsize,
pub(crate) inner: Weak<Inner>,
pub(crate) at: Mutex<Option<Instant>>,
// TODO: this is only accessed by the timer thread, should have a more
// lightweight protection than a `Mutex`
pub(crate) slot: Mutex<Option<Slot>>,
}
impl Timer {
/// Creates a new timer heap ready to create new timers.
pub fn new() -> Timer {
Timer {
inner: Arc::new(Inner {
list: ArcList::new(),
waker: AtomicWaker::new(),
}),
timer_heap: Heap::new(),
}
}
/// Returns a handle to this timer heap, used to create new timeouts.
pub fn handle(&self) -> TimerHandle {
TimerHandle {
inner: Arc::downgrade(&self.inner),
}
}
/// Returns the time at which this timer next needs to be invoked with
/// `advance_to`.
///
/// Event loops or threads typically want to sleep until the specified
/// instant.
pub fn next_event(&self) -> Option<Instant> {
self.timer_heap.peek().map(|t| t.at)
}
/// Proces any timers which are supposed to fire at or before the current
/// instant.
///
/// This method is equivalent to `self.advance_to(Instant::now())`.
pub fn advance(&mut self) {
self.advance_to(Instant::now())
}
/// Proces any timers which are supposed to fire before `now` specified.
///
/// This method should be called on `Timer` periodically to advance the
/// internal state and process any pending timers which need to fire.
pub fn advance_to(&mut self, now: Instant) {
loop {
match self.timer_heap.peek() {
Some(head) if head.at <= now => {}
Some(_) => break,
None => break,
};
// Flag the timer as fired and then notify its task, if any, that's
// blocked.
let heap_timer = self.timer_heap.pop().unwrap();
*heap_timer.node.slot.lock().unwrap() = None;
let bits = heap_timer.gen << 2;
match heap_timer
.node
.state
.compare_exchange(bits, bits | 0b01, SeqCst, SeqCst)
{
Ok(_) => heap_timer.node.waker.wake(),
Err(_b) => {}
}
}
}
/// Either updates the timer at slot `idx` to fire at `at`, or adds a new
/// timer at `idx` and sets it to fire at `at`.
fn update_or_add(&mut self, at: Instant, node: Arc<Node<ScheduledTimer>>) {
// TODO: avoid remove + push and instead just do one sift of the heap?
// In theory we could update it in place and then do the percolation
// as necessary
let gen = node.state.load(SeqCst) >> 2;
let mut slot = node.slot.lock().unwrap();
if let Some(heap_slot) = slot.take() {
self.timer_heap.remove(heap_slot);
}
*slot = Some(self.timer_heap.push(HeapTimer {
at,
gen,
node: node.clone(),
}));
}
fn remove(&mut self, node: Arc<Node<ScheduledTimer>>) {
// If this `idx` is still around and it's still got a registered timer,
// then we jettison it form the timer heap.
let mut slot = node.slot.lock().unwrap();
let heap_slot = match slot.take() {
Some(slot) => slot,
None => return,
};
self.timer_heap.remove(heap_slot);
}
fn invalidate(&mut self, node: Arc<Node<ScheduledTimer>>) {
node.state.fetch_or(0b10, SeqCst);
node.waker.wake();
}
}
impl Future for Timer {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).waker.register(cx.waker());
let mut list = self.inner.list.take();
while let Some(node) = list.pop() {
let at = *node.at.lock().unwrap();
match at {
Some(at) => self.update_or_add(at, node),
None => self.remove(node),
}
}
Poll::Pending
}
}
impl Drop for Timer {
fn drop(&mut self) {
// Seal off our list to prevent any more updates from getting pushed on.
// Any timer which sees an error from the push will immediately become
// inert.
let mut list = self.inner.list.take_and_seal();
// Now that we'll never receive another timer, drain the list of all
// updates and also drain our heap of all active timers, invalidating
// everything.
while let Some(t) = list.pop() {
self.invalidate(t);
}
while let Some(t) = self.timer_heap.pop() {
self.invalidate(t.node);
}
}
}
impl fmt::Debug for Timer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("Timer").field("heap", &"...").finish()
}
}
impl Default for Timer {
fn default() -> Self {
Self::new()
}
}
static HANDLE_FALLBACK: AtomicUsize = AtomicUsize::new(0);
/// Error returned from `TimerHandle::set_fallback`.
#[derive(Clone, Debug)]
struct SetDefaultError(());
impl TimerHandle {
/// Configures this timer handle to be the one returned by
/// `TimerHandle::default`.
///
/// By default a global thread is initialized on the first call to
/// `TimerHandle::default`. This first call can happen transitively through
/// `Delay::new`. If, however, that hasn't happened yet then the global
/// default timer handle can be configured through this method.
///
/// This method can be used to prevent the global helper thread from
/// spawning. If this method is successful then the global helper thread
/// will never get spun up.
///
/// On success this timer handle will have installed itself globally to be
/// used as the return value for `TimerHandle::default` unless otherwise
/// specified.
///
/// # Errors
///
/// If another thread has already called `set_as_global_fallback` or this
/// thread otherwise loses a race to call this method then it will fail
/// returning an error. Once a call to `set_as_global_fallback` is
/// successful then no future calls may succeed.
fn set_as_global_fallback(self) -> Result<(), SetDefaultError> {
unsafe {
let val = self.into_usize();
match HANDLE_FALLBACK.compare_exchange(0, val, SeqCst, SeqCst) {
Ok(_) => Ok(()),
Err(_) => {
drop(TimerHandle::from_usize(val));
Err(SetDefaultError(()))
}
}
}
}
fn into_usize(self) -> usize {
unsafe { mem::transmute::<Weak<Inner>, usize>(self.inner) }
}
unsafe fn from_usize(val: usize) -> TimerHandle {
let inner = mem::transmute::<usize, Weak<Inner>>(val);
TimerHandle { inner }
}
}
impl Default for TimerHandle {
fn default() -> TimerHandle {
let mut fallback = HANDLE_FALLBACK.load(SeqCst);
// If the fallback hasn't been previously initialized then let's spin
// up a helper thread and try to initialize with that. If we can't
// actually create a helper thread then we'll just return a "defunkt"
// handle which will return errors when timer objects are attempted to
// be associated.
if fallback == 0 {
let helper = match global::HelperThread::new() {
Ok(helper) => helper,
Err(_) => return TimerHandle { inner: Weak::new() },
};
// If we successfully set ourselves as the actual fallback then we
// want to `forget` the helper thread to ensure that it persists
// globally. If we fail to set ourselves as the fallback that means
// that someone was racing with this call to
// `TimerHandle::default`. They ended up winning so we'll destroy
// our helper thread (which shuts down the thread) and reload the
// fallback.
if helper.handle().set_as_global_fallback().is_ok() {
let ret = helper.handle();
helper.forget();
return ret; | }
fallback = HANDLE_FALLBACK.load(SeqCst);
}
// At this point our fallback handle global was configured so we use
// its value to reify a handle, clone it, and then forget our reified
// handle as we don't actually have an owning reference to it.
assert!(fallback!= 0);
unsafe {
let handle = TimerHandle::from_usize(fallback);
let ret = handle.clone();
let _ = handle.into_usize();
ret
}
}
}
impl fmt::Debug for TimerHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("TimerHandle")
.field("inner", &"...")
.finish()
}
} | random_line_split |
|
main.rs | use {
serde::Deserialize,
serde_json,
serde_repr::Deserialize_repr,
std::{collections::BTreeMap, io::Read, fs::File, process::Command},
};
fn main() -> Result<(), Failure> {
let mut file = File::open("token")?;
let mut token = String::new();
file.read_to_string(&mut token)?;
token.insert_str(0, "token=");
let output = &Command::new("curl").args(&["https://api.todoist.com/sync/v8/sync", "-d", &token, "-d", "sync_token=*", "-d", "resource_types=[\"all\"]"]).output()?.stdout;
let sync: Sync = serde_json::from_slice(&output)?;
let user = User::from(sync);
for task in user.tasks {
println!("- {:?}", task);
}
Ok(())
}
#[derive(Debug)]
enum Failure {
Io(std::io::Error),
Serde(serde_json::Error),
Utf8(std::str::Utf8Error),
}
impl From<std::io::Error> for Failure {
fn from(error: std::io::Error) -> Self {
Self::Io(error)
}
}
impl From<serde_json::Error> for Failure {
fn from(error: serde_json::Error) -> Self {
Self::Serde(error)
}
}
impl From<std::str::Utf8Error> for Failure {
fn from(error: std::str::Utf8Error) -> Self {
Self::Utf8(error)
}
}
struct User {
tasks: Vec<Task>,
}
impl From<Sync> for User {
fn from(sync: Sync) -> Self {
let mut tasks = Vec::new();
for item in sync.items {
tasks.push(item.into());
}
Self {
tasks,
}
}
}
#[derive(Debug)]
struct Task {
item: Item,
}
impl From<Item> for Task {
fn from(item: Item) -> Self {
Self { item }
}
}
#[derive(Debug, Deserialize)]
struct Sync {
/// A new synchronization token.
sync_token: String,
/// If this contains all data.
full_sync: bool,
/// A [`UserData`].
user: UserData,
/// An array of [`Project`]s.
projects: Vec<Project>,
/// An array of [`Item`]s.
items: Vec<Item>,
/// An array of [`Note`]s.
notes: Vec<Note>,
/// An array of [`ProjectNote`]s.
project_notes: Vec<ProjectNote>,
/// An array of [`Section`]s.
sections: Vec<Section>,
/// An array of [`Label`]s.
labels: Vec<Label>,
/// An array of [`Filter`]s.
filters: Vec<Filter>,
/// Maps items to their order in the daily agenda.
day_orders: BTreeMap<ItemId, Order>,
/// An array of [`Reminder`]s.
reminders: Vec<Reminder>,
/// The collaborators for all shared projects.
collaborators: Vec<Collaborator>,
/// An array of [`CollaboratorState`]s.
#[serde(default)]
collaborators_states: Vec<CollaboratorState>,
/// An array of [`LiveNotification`]s.
live_notifications: Vec<LiveNotification>,
/// The id of the last [`LiveNotification`] seen by the user.
live_notifications_last_read_id: LiveNotificationId,
/// The [`UserSettings`].
user_settings: UserSettings,
}
#[derive(Debug, Deserialize)]
struct Order(i64);
#[derive(Debug, Deserialize)]
/// A Todoist user.
struct UserData {
/// The default number of minutes for set automatic reminders.
auto_reminder: u64,
/// Link to a 195x195 image of the user's avatar.
avatar_big: String,
/// Link to a 60x60 image of the user's avatar.
avatar_medium: String,
/// Link to a 640x640 image of the user's avatar.
avatar_s640: String,
/// Link to a 35x35 image of the user's avatar.
avatar_small: String,
/// The user's [`BusinessAccountId`].
#[serde(default)]
business_account_id: Option<BusinessAccountId>,
/// The number of tasks set as the user's daily goal.
daily_goal: u64,
/// The user's desired date format.
date_format: DateFormat,
/// If smart date recognition has been disabled.
dateist_inline_disabled: bool,
/// The language expected for the date recognition.
dateist_lang: Option<Language>,
/// The days that the user is off.
days_off: Vec<Day>,
/// The default reminder for the user.
default_reminder: Reminder,
/// The user's email.
email: String,
/// Special internal features that apply to the user.
features: Features,
full_name: String,
id: UserId,
#[serde(default)]
image_id: Option<String>,
inbox_project: ProjectId,
is_biz_admin: bool,
is_premium: bool,
join_date: String,
karma: f64,
karma_trend: KarmaTrend,
lang: Language,
mobile_host: Option<String>,
mobile_number: Option<String>,
next_week: Day,
premium_until: Option<String>,
sort_order: SortOrder,
start_day: Day,
start_page: Page,
#[serde(default)]
team_inbox: Option<ProjectId>,
theme: Theme,
time_format: TimeFormat,
token: String,
tz_info: TimezoneInfo,
weekly_goal: u64,
}
#[derive(Debug, Deserialize)]
struct UserId(u64);
#[derive(Debug, Deserialize)]
struct BusinessAccountId(u64);
#[derive(Debug, Deserialize)]
struct Project {
id: ProjectId,
name: String,
color: Color,
parent_id: Option<ProjectId>,
child_order: Order,
collapsed: Flag,
shared: bool,
is_deleted: Flag,
is_archived: Flag,
is_favorite: Flag,
sync_id: Option<ProjectSyncId>,
#[serde(default)]
inbox_project: bool,
#[serde(default)]
team_inbox: bool,
}
#[derive(Debug, Deserialize)]
struct ProjectId(u64);
#[derive(Debug, Deserialize)]
struct ProjectSyncId(u64);
#[derive(Debug, Deserialize)]
struct Item {
id: ItemId,
user_id: UserId,
project_id: ProjectId,
content: String,
due: Option<Date>,
priority: Priority,
parent_id: Option<ItemId>,
child_order: Order,
section_id: Option<SectionId>,
day_order: Order,
collapsed: Flag,
labels: Vec<LabelId>,
added_by_uid: Option<UserId>,
assigned_by_uid: Option<UserId>,
responsible_uid: Option<UserId>,
checked: Flag,
in_history: Flag,
is_deleted: Flag,
sync_id: Option<ItemSyncId>,
date_completed: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct ItemSyncId(u64);
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd)]
struct ItemId(u64);
#[derive(Debug, Deserialize)]
struct Note {
id: NoteId,
posted_uid: UserId,
item_id: ItemId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct NoteId(u64);
#[derive(Debug, Deserialize)]
struct ProjectNote {
id: ProjectNoteId,
posted_uid: UserId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct ProjectNoteId(u64);
#[derive(Debug, Deserialize)]
struct Section {
id: SectionId,
name: String,
project_id: ProjectId,
section_order: Order,
collapsed: bool,
sync_id: Option<SectionSyncId>,
is_deleted: bool,
is_archived: bool,
date_archived: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct SectionId(u64);
#[derive(Debug, Deserialize)]
struct SectionSyncId(u64);
#[derive(Debug, Deserialize)]
struct Label {
id: LabelId,
name: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct | (u64);
#[derive(Debug, Deserialize)]
struct Filter {
id: FilterId,
name: String,
query: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct FilterId(u64);
#[derive(Debug, Deserialize)]
struct Collaborator {
id: CollaboratorId,
email: String,
full_name: String,
timezone: String,
#[serde(default)]
image_id: Option<String>,
}
#[derive(Debug, Deserialize)]
struct CollaboratorId(u64);
#[derive(Debug, Deserialize)]
struct CollaboratorState {
project_id: ProjectId,
user_id: UserId,
state: CollaboratorStatus,
is_deleted: bool,
}
#[derive(Debug, Deserialize)]
// Note: v8 api says there should be a `seq_no` field that holds an integer.
struct LiveNotification {
id: LiveNotificationId,
// Note: v8 api says that created should be an integer that is the epoch timestamp.
created: String,
// Note: v8 api does not say from_uid is optional.
#[serde(default)]
from_uid: Option<UserId>,
notification_key: String,
notification_type: String,
is_unread: Flag,
}
#[derive(Debug, Deserialize)]
struct LiveNotificationId(u64);
#[derive(Debug, Deserialize)]
struct UserSettings {
reminder_push: bool,
#[serde(default)]
reminder_sms: bool,
reminder_desktop: bool,
reminder_email: bool,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Color {
Crimson = 30,
Red = 31,
Orange = 32,
Yellow = 33,
Olive = 34,
LightGreen = 35,
DarkGreen = 36,
SeaGreen = 37,
SteelBlue = 38,
SkyBlue = 39,
BabyBlue = 40,
Blue = 41,
RoyalPurple = 42,
Violet = 43,
Pink = 44,
Mulberry = 45,
Salmon = 46,
Gray = 47,
LightGray = 48,
Tan = 49,
}
#[derive(Debug, Deserialize)]
enum CollaboratorStatus {
Active,
Invited,
}
#[derive(Debug, Deserialize)]
struct FileAttachment {
file_type: String,
file_name: String,
file_size: u64,
file_url: String,
upload_state: UploadState,
}
#[derive(Debug, Deserialize)]
enum UploadState {
Pending,
Completed,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Priority {
Natural = 1,
High = 2,
Urgent = 3,
VeryUrgent = 4,
}
#[derive(Debug, Deserialize)]
struct Date {
date: String,
timezone: Option<String>,
string: String,
lang: Language,
is_recurring: bool,
}
#[derive(Debug, Deserialize)]
struct TimezoneInfo {
gmt_string: String,
hours: i8,
is_dst: Flag,
minutes: u8,
timezone: String,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum TimeFormat {
TwentyFour = 0,
Twelve = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Theme {
Theme0 = 0,
Theme1 = 1,
Theme2 = 2,
Theme3 = 3,
Theme4 = 4,
Theme5 = 5,
Theme6 = 6,
Theme7 = 7,
Theme8 = 8,
Theme9 = 9,
Theme10 = 10,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum Page {
InfoPage,
Blank,
Query(String),
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum SortOrder {
OldestDatesFirst = 0,
OldestDatesLast = 1,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum KarmaTrend {
Up,
}
#[derive(Debug, Deserialize)]
struct Features {
/// If the user has enabled beta.
beta: Flag,
/// If inline date parsing is enabled.
dateist_inline_disabled: bool,
dateist_lang: Option<Language>,
#[serde(default)]
gold_theme: bool,
has_push_reminders: bool,
karma_disabled: bool,
karma_vacation: bool,
restriction: u64,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Flag {
False = 0,
True = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Day {
Monday = 1,
Tuesday = 2,
Wednesday = 3,
Thursday = 4,
Friday = 5,
Saturday = 6,
Sunday = 7,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum Reminder {
Email,
Mobile,
Push,
NoDefault,
}
/// The format of a date.
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum DateFormat {
/// dd-mm-yyyy
DayMonth = 0,
/// mm-dd-yyyy
MonthDay = 1,
}
#[derive(Debug, Deserialize)]
enum Language {
#[serde(rename = "da")]
Danish,
#[serde(rename = "ge")]
German,
#[serde(rename = "en")]
English,
#[serde(rename = "es")]
Spanish,
#[serde(rename = "fi")]
Finnish,
#[serde(rename = "fr")]
French,
#[serde(rename = "it")]
Italian,
#[serde(rename = "ja")]
Japanese,
#[serde(rename = "ko")]
Korean,
#[serde(rename = "nl")]
Dutch,
#[serde(rename = "pl")]
Polish,
#[serde(rename = "pt_Br")]
BrazilianPortuguese,
#[serde(rename = "ru")]
Russian,
#[serde(rename = "sv")]
Sweedish,
#[serde(rename = "tr")]
Turkish,
#[serde(rename = "zh_CN")]
MainlandChinese,
#[serde(rename = "zh_TW")]
TaiwanChinese,
}
| LabelId | identifier_name |
main.rs | use { | std::{collections::BTreeMap, io::Read, fs::File, process::Command},
};
fn main() -> Result<(), Failure> {
let mut file = File::open("token")?;
let mut token = String::new();
file.read_to_string(&mut token)?;
token.insert_str(0, "token=");
let output = &Command::new("curl").args(&["https://api.todoist.com/sync/v8/sync", "-d", &token, "-d", "sync_token=*", "-d", "resource_types=[\"all\"]"]).output()?.stdout;
let sync: Sync = serde_json::from_slice(&output)?;
let user = User::from(sync);
for task in user.tasks {
println!("- {:?}", task);
}
Ok(())
}
#[derive(Debug)]
enum Failure {
Io(std::io::Error),
Serde(serde_json::Error),
Utf8(std::str::Utf8Error),
}
impl From<std::io::Error> for Failure {
fn from(error: std::io::Error) -> Self {
Self::Io(error)
}
}
impl From<serde_json::Error> for Failure {
fn from(error: serde_json::Error) -> Self {
Self::Serde(error)
}
}
impl From<std::str::Utf8Error> for Failure {
fn from(error: std::str::Utf8Error) -> Self {
Self::Utf8(error)
}
}
struct User {
tasks: Vec<Task>,
}
impl From<Sync> for User {
fn from(sync: Sync) -> Self {
let mut tasks = Vec::new();
for item in sync.items {
tasks.push(item.into());
}
Self {
tasks,
}
}
}
#[derive(Debug)]
struct Task {
item: Item,
}
impl From<Item> for Task {
fn from(item: Item) -> Self {
Self { item }
}
}
#[derive(Debug, Deserialize)]
struct Sync {
/// A new synchronization token.
sync_token: String,
/// If this contains all data.
full_sync: bool,
/// A [`UserData`].
user: UserData,
/// An array of [`Project`]s.
projects: Vec<Project>,
/// An array of [`Item`]s.
items: Vec<Item>,
/// An array of [`Note`]s.
notes: Vec<Note>,
/// An array of [`ProjectNote`]s.
project_notes: Vec<ProjectNote>,
/// An array of [`Section`]s.
sections: Vec<Section>,
/// An array of [`Label`]s.
labels: Vec<Label>,
/// An array of [`Filter`]s.
filters: Vec<Filter>,
/// Maps items to their order in the daily agenda.
day_orders: BTreeMap<ItemId, Order>,
/// An array of [`Reminder`]s.
reminders: Vec<Reminder>,
/// The collaborators for all shared projects.
collaborators: Vec<Collaborator>,
/// An array of [`CollaboratorState`]s.
#[serde(default)]
collaborators_states: Vec<CollaboratorState>,
/// An array of [`LiveNotification`]s.
live_notifications: Vec<LiveNotification>,
/// The id of the last [`LiveNotification`] seen by the user.
live_notifications_last_read_id: LiveNotificationId,
/// The [`UserSettings`].
user_settings: UserSettings,
}
#[derive(Debug, Deserialize)]
struct Order(i64);
#[derive(Debug, Deserialize)]
/// A Todoist user.
struct UserData {
/// The default number of minutes for set automatic reminders.
auto_reminder: u64,
/// Link to a 195x195 image of the user's avatar.
avatar_big: String,
/// Link to a 60x60 image of the user's avatar.
avatar_medium: String,
/// Link to a 640x640 image of the user's avatar.
avatar_s640: String,
/// Link to a 35x35 image of the user's avatar.
avatar_small: String,
/// The user's [`BusinessAccountId`].
#[serde(default)]
business_account_id: Option<BusinessAccountId>,
/// The number of tasks set as the user's daily goal.
daily_goal: u64,
/// The user's desired date format.
date_format: DateFormat,
/// If smart date recognition has been disabled.
dateist_inline_disabled: bool,
/// The language expected for the date recognition.
dateist_lang: Option<Language>,
/// The days that the user is off.
days_off: Vec<Day>,
/// The default reminder for the user.
default_reminder: Reminder,
/// The user's email.
email: String,
/// Special internal features that apply to the user.
features: Features,
full_name: String,
id: UserId,
#[serde(default)]
image_id: Option<String>,
inbox_project: ProjectId,
is_biz_admin: bool,
is_premium: bool,
join_date: String,
karma: f64,
karma_trend: KarmaTrend,
lang: Language,
mobile_host: Option<String>,
mobile_number: Option<String>,
next_week: Day,
premium_until: Option<String>,
sort_order: SortOrder,
start_day: Day,
start_page: Page,
#[serde(default)]
team_inbox: Option<ProjectId>,
theme: Theme,
time_format: TimeFormat,
token: String,
tz_info: TimezoneInfo,
weekly_goal: u64,
}
#[derive(Debug, Deserialize)]
struct UserId(u64);
#[derive(Debug, Deserialize)]
struct BusinessAccountId(u64);
#[derive(Debug, Deserialize)]
struct Project {
id: ProjectId,
name: String,
color: Color,
parent_id: Option<ProjectId>,
child_order: Order,
collapsed: Flag,
shared: bool,
is_deleted: Flag,
is_archived: Flag,
is_favorite: Flag,
sync_id: Option<ProjectSyncId>,
#[serde(default)]
inbox_project: bool,
#[serde(default)]
team_inbox: bool,
}
#[derive(Debug, Deserialize)]
struct ProjectId(u64);
#[derive(Debug, Deserialize)]
struct ProjectSyncId(u64);
#[derive(Debug, Deserialize)]
struct Item {
id: ItemId,
user_id: UserId,
project_id: ProjectId,
content: String,
due: Option<Date>,
priority: Priority,
parent_id: Option<ItemId>,
child_order: Order,
section_id: Option<SectionId>,
day_order: Order,
collapsed: Flag,
labels: Vec<LabelId>,
added_by_uid: Option<UserId>,
assigned_by_uid: Option<UserId>,
responsible_uid: Option<UserId>,
checked: Flag,
in_history: Flag,
is_deleted: Flag,
sync_id: Option<ItemSyncId>,
date_completed: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct ItemSyncId(u64);
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd)]
struct ItemId(u64);
#[derive(Debug, Deserialize)]
struct Note {
id: NoteId,
posted_uid: UserId,
item_id: ItemId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct NoteId(u64);
#[derive(Debug, Deserialize)]
struct ProjectNote {
id: ProjectNoteId,
posted_uid: UserId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct ProjectNoteId(u64);
#[derive(Debug, Deserialize)]
struct Section {
id: SectionId,
name: String,
project_id: ProjectId,
section_order: Order,
collapsed: bool,
sync_id: Option<SectionSyncId>,
is_deleted: bool,
is_archived: bool,
date_archived: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct SectionId(u64);
#[derive(Debug, Deserialize)]
struct SectionSyncId(u64);
#[derive(Debug, Deserialize)]
struct Label {
id: LabelId,
name: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct LabelId(u64);
#[derive(Debug, Deserialize)]
struct Filter {
id: FilterId,
name: String,
query: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct FilterId(u64);
#[derive(Debug, Deserialize)]
struct Collaborator {
id: CollaboratorId,
email: String,
full_name: String,
timezone: String,
#[serde(default)]
image_id: Option<String>,
}
#[derive(Debug, Deserialize)]
struct CollaboratorId(u64);
#[derive(Debug, Deserialize)]
struct CollaboratorState {
project_id: ProjectId,
user_id: UserId,
state: CollaboratorStatus,
is_deleted: bool,
}
#[derive(Debug, Deserialize)]
// Note: v8 api says there should be a `seq_no` field that holds an integer.
struct LiveNotification {
id: LiveNotificationId,
// Note: v8 api says that created should be an integer that is the epoch timestamp.
created: String,
// Note: v8 api does not say from_uid is optional.
#[serde(default)]
from_uid: Option<UserId>,
notification_key: String,
notification_type: String,
is_unread: Flag,
}
#[derive(Debug, Deserialize)]
struct LiveNotificationId(u64);
#[derive(Debug, Deserialize)]
struct UserSettings {
reminder_push: bool,
#[serde(default)]
reminder_sms: bool,
reminder_desktop: bool,
reminder_email: bool,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Color {
Crimson = 30,
Red = 31,
Orange = 32,
Yellow = 33,
Olive = 34,
LightGreen = 35,
DarkGreen = 36,
SeaGreen = 37,
SteelBlue = 38,
SkyBlue = 39,
BabyBlue = 40,
Blue = 41,
RoyalPurple = 42,
Violet = 43,
Pink = 44,
Mulberry = 45,
Salmon = 46,
Gray = 47,
LightGray = 48,
Tan = 49,
}
#[derive(Debug, Deserialize)]
enum CollaboratorStatus {
Active,
Invited,
}
#[derive(Debug, Deserialize)]
struct FileAttachment {
file_type: String,
file_name: String,
file_size: u64,
file_url: String,
upload_state: UploadState,
}
#[derive(Debug, Deserialize)]
enum UploadState {
Pending,
Completed,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Priority {
Natural = 1,
High = 2,
Urgent = 3,
VeryUrgent = 4,
}
#[derive(Debug, Deserialize)]
struct Date {
date: String,
timezone: Option<String>,
string: String,
lang: Language,
is_recurring: bool,
}
#[derive(Debug, Deserialize)]
struct TimezoneInfo {
gmt_string: String,
hours: i8,
is_dst: Flag,
minutes: u8,
timezone: String,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum TimeFormat {
TwentyFour = 0,
Twelve = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Theme {
Theme0 = 0,
Theme1 = 1,
Theme2 = 2,
Theme3 = 3,
Theme4 = 4,
Theme5 = 5,
Theme6 = 6,
Theme7 = 7,
Theme8 = 8,
Theme9 = 9,
Theme10 = 10,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum Page {
InfoPage,
Blank,
Query(String),
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum SortOrder {
OldestDatesFirst = 0,
OldestDatesLast = 1,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum KarmaTrend {
Up,
}
#[derive(Debug, Deserialize)]
struct Features {
/// If the user has enabled beta.
beta: Flag,
/// If inline date parsing is enabled.
dateist_inline_disabled: bool,
dateist_lang: Option<Language>,
#[serde(default)]
gold_theme: bool,
has_push_reminders: bool,
karma_disabled: bool,
karma_vacation: bool,
restriction: u64,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Flag {
False = 0,
True = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Day {
Monday = 1,
Tuesday = 2,
Wednesday = 3,
Thursday = 4,
Friday = 5,
Saturday = 6,
Sunday = 7,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum Reminder {
Email,
Mobile,
Push,
NoDefault,
}
/// The format of a date.
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum DateFormat {
/// dd-mm-yyyy
DayMonth = 0,
/// mm-dd-yyyy
MonthDay = 1,
}
#[derive(Debug, Deserialize)]
enum Language {
#[serde(rename = "da")]
Danish,
#[serde(rename = "ge")]
German,
#[serde(rename = "en")]
English,
#[serde(rename = "es")]
Spanish,
#[serde(rename = "fi")]
Finnish,
#[serde(rename = "fr")]
French,
#[serde(rename = "it")]
Italian,
#[serde(rename = "ja")]
Japanese,
#[serde(rename = "ko")]
Korean,
#[serde(rename = "nl")]
Dutch,
#[serde(rename = "pl")]
Polish,
#[serde(rename = "pt_Br")]
BrazilianPortuguese,
#[serde(rename = "ru")]
Russian,
#[serde(rename = "sv")]
Sweedish,
#[serde(rename = "tr")]
Turkish,
#[serde(rename = "zh_CN")]
MainlandChinese,
#[serde(rename = "zh_TW")]
TaiwanChinese,
} | serde::Deserialize,
serde_json,
serde_repr::Deserialize_repr, | random_line_split |
lib.rs | #![recursion_limit = "1024"]
#[macro_use]
extern crate derive_new;
#[macro_use]
extern crate derive_setters;
#[macro_use]
extern crate log;
#[macro_use]
extern crate thiserror;
pub mod checksum;
mod range;
mod systems;
pub use self::systems::*;
use std::{
fmt::Debug,
io,
num::{NonZeroU16, NonZeroU32, NonZeroU64},
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, UNIX_EPOCH},
};
use async_std::{
fs::{self, File},
prelude::*,
};
use chrono::{DateTime, Utc};
use filetime::FileTime;
use futures::{
channel::mpsc,
stream::{self, StreamExt},
};
use http_client::native::NativeClient;
use numtoa::NumToA;
use surf::{
Client, Request, Response, StatusCode
};
pub type EventSender = mpsc::UnboundedSender<(Arc<Path>, FetchEvent)>;
pub type Output<T> = (Arc<Path>, Result<T, Error>);
/// An error from the asynchronous file fetcher.
#[derive(Debug, Error)]
pub enum Error {
#[error("task was cancelled")]
Cancelled,
#[error("http client error")]
Client(surf::Error),
#[error("unable to concatenate fetched parts")]
Concatenate(#[source] io::Error),
#[error("unable to create file")]
FileCreate(#[source] io::Error),
#[error("unable to set timestamp on {:?}", _0)]
FileTime(Arc<Path>, #[source] io::Error),
#[error("content length is an invalid range")]
InvalidRange(#[source] io::Error),
#[error("unable to remove file with bad metadata")]
MetadataRemove(#[source] io::Error),
#[error("destination has no file name")]
Nameless,
#[error("unable to open fetched part")]
OpenPart(Arc<Path>, #[source] io::Error),
#[error("destination lacks parent")]
Parentless,
#[error("connection timed out")]
TimedOut,
#[error("error writing to file")]
Write(#[source] io::Error),
#[error("failed to rename partial to destination")]
Rename(#[source] io::Error),
#[error("server responded with an error: {}", _0)]
Status(StatusCode),
}
/// Information about a source being fetched.
#[derive(Debug, Setters)]
pub struct Source {
/// URLs whereby the file can be found.
#[setters(skip)]
pub urls: Arc<[Box<str>]>,
/// Where the file shall ultimately be fetched to.
#[setters(skip)]
pub dest: Arc<Path>,
/// Optional location to store the partial file
#[setters(strip_option)]
#[setters(into)]
pub part: Option<Arc<Path>>,
}
impl Source {
pub fn new(urls: impl Into<Arc<[Box<str>]>>, dest: impl Into<Arc<Path>>) -> Self {
Self { urls: urls.into(), dest: dest.into(), part: None }
}
}
impl From<surf::Error> for Error {
fn from(e: surf::Error) -> Self { Self::Client(e) }
}
/// Events which are submitted by the fetcher.
#[derive(Debug)]
pub enum FetchEvent {
/// Signals that this file was already fetched.
AlreadyFetched,
/// States that we know the length of the file being fetched.
ContentLength(u64),
/// Notifies that the file has been fetched.
Fetched,
/// Notifies that a file is being fetched.
Fetching,
/// Reports the amount of bytes that have been read for a file.
Progress(usize),
/// Reports that a part of a file is being fetched.
PartFetching(u64),
/// Reports that a part has been fetched.
PartFetched(u64),
}
/// An asynchronous file fetcher for clients fetching files.
///
/// The futures generated by the fetcher are compatible with single and multi-threaded
/// runtimes, allowing you to choose between the runtime that works best for your
/// application. A single-threaded runtime is generally recommended for fetching files,
/// as your network connection is unlikely to be faster than a single CPU core.
#[derive(new, Setters)]
pub struct Fetcher {
#[setters(skip)]
client: Client,
/// When set, cancels any active operations.
#[new(default)]
#[setters(strip_option)]
cancel: Option<Arc<AtomicBool>>,
/// The number of concurrent connections to sustain per file being fetched.
#[new(default)]
connections_per_file: Option<NonZeroU16>,
/// The number of attempts to make when a request fails.
#[new(value = "unsafe { NonZeroU16::new_unchecked(3) } ")]
retries: NonZeroU16,
/// The maximum size of a part file when downloading in parts.
#[new(value = "unsafe { NonZeroU32::new_unchecked(2 * 1024 * 1024) }")]
max_part_size: NonZeroU32,
/// The time to wait between chunks before giving up.
#[new(default)]
#[setters(strip_option)]
timeout: Option<Duration>,
/// Holds a sender for submitting events to.
#[new(default)]
#[setters(into)]
#[setters(strip_option)]
events: Option<Arc<EventSender>>,
}
impl Default for Fetcher {
fn default() -> Self { Self::new(Client::with_http_client(NativeClient::default())) }
}
impl Fetcher {
/// Wraps the fetcher in an Arc.
pub fn into_arc(self) -> Arc<Self> { Arc::new(self) }
/// Request a file from one or more URIs.
///
/// At least one URI must be provided as a source for the file. Each additional URI
/// serves as a mirror for failover and load-balancing purposes.
pub async fn request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => Ok(()),
Err(mut why) => {
for _ in 1..self.retries.get() {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => return Ok(()),
Err(cause) => why = cause,
}
}
Err(why)
}
}
}
async fn inner_request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
let mut modified = None;
let mut length = None;
let mut if_modified_since = None;
// If the file already exists, validate that it is the same.
if to.exists() {
if let Some(response) = head(&self.client, &*uris[0]).await? {
let content_length = response.content_length();
modified = response.last_modified();
if let (Some(content_length), Some(last_modified)) =
(content_length, modified)
{
match fs::metadata(to.as_ref()).await {
Ok(metadata) => {
let modified = metadata.modified().map_err(Error::Write)?;
let ts = modified
.duration_since(UNIX_EPOCH)
.expect("time went backwards");
if metadata.len() == content_length
&& ts.as_secs() == last_modified.timestamp() as u64
{
self.send((to, FetchEvent::AlreadyFetched));
return Ok(());
}
if_modified_since =
Some(DateTime::<Utc>::from(modified).to_rfc2822());
length = Some(content_length);
}
Err(why) => {
error!("failed to fetch metadata of {:?}: {}", to, why);
fs::remove_file(to.as_ref())
.await
.map_err(Error::MetadataRemove)?;
}
}
}
}
}
// If set, this will use multiple connections to download a file in parts.
if let Some(connections) = self.connections_per_file {
if let Some(response) = head(&self.client, &*uris[0]).await? {
modified = response.last_modified();
let length = match length {
Some(length) => Some(length),
None => response.content_length(),
};
if let Some(length) = length {
if supports_range(&self.client, &*uris[0], length).await? {
self.send((to.clone(), FetchEvent::ContentLength(length)));
return self
.get_many(length, connections.get(), uris, to, modified)
.await;
}
}
}
}
let mut request = self.client.get(&*uris[0]).header("Expect", "").build();
if let Some(modified_since) = if_modified_since {
request.set_header("if-modified-since", modified_since);
}
let path =
match self.get(&mut modified, request, to.clone(), to.clone(), None).await {
Ok(path) => path,
// Server does not support if-modified-since
Err(Error::Status(StatusCode::NotImplemented)) => {
let request = self.client.get(&*uris[0]).header("Expect", "").build();
self.get(&mut modified, request, to.clone(), to, None).await?
}
Err(why) => return Err(why),
};
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&path, filetime, filetime)
.map_err(move |why| Error::FileTime(path, why))?;
}
Ok(())
}
async fn get(
&self,
modified: &mut Option<DateTime<Utc>>,
request: Request,
to: Arc<Path>,
dest: Arc<Path>,
length: Option<u64>,
) -> Result<Arc<Path>, Error> {
let mut file = File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
if let Some(length) = length {
file.set_len(length).await.map_err(Error::Write)?;
}
let response = &mut validate(if let Some(duration) = self.timeout {
timed(duration, async { self.client.send(request).await.map_err(Error::from) }).await??
} else {
self.client.send(request).await?
})?;
if modified.is_none() {
*modified = response.last_modified();
}
if response.status() == StatusCode::NotModified {
return Ok(to);
}
let buffer = &mut [0u8; 8 * 1024];
let mut read;
loop {
if self.cancelled() {
return Err(Error::Cancelled);
}
let reader = async { response.read(buffer).await.map_err(Error::Write) };
read = match self.timeout {
Some(duration) => timed(duration, reader).await??,
None => reader.await?,
};
if read!= 0 {
self.send((dest.clone(), FetchEvent::Progress(read)));
file.write_all(&buffer[..read]).await.map_err(Error::Write)?;
} else {
break;
}
}
Ok(to)
}
async fn get_many(
self: Arc<Self>,
length: u64,
concurrent: u16,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
mut modified: Option<DateTime<Utc>>,
) -> Result<(), Error> {
let parent = to.parent().ok_or(Error::Parentless)?;
let filename = to.file_name().ok_or(Error::Nameless)?;
let mut buf = [0u8; 20];
// The destination which parts will be concatenated to.
let concatenated_file =
&mut File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
let max_part_size =
unsafe { NonZeroU64::new_unchecked(u64::from(self.max_part_size.get())) };
let to_ = to.clone();
let parts = stream::iter(range::generate(length, max_part_size).enumerate())
// Generate a future for fetching each part that a range describes.
.map(move |(partn, (range_start, range_end))| {
let uri = uris[partn % uris.len()].clone();
let part_path = {
let mut new_filename = filename.to_os_string();
new_filename
.push(&[".part", partn.numtoa_str(10, &mut buf)].concat());
parent.join(new_filename)
};
let fetcher = self.clone();
let to = to_.clone();
async move {
let range = range::to_string(range_start, range_end);
fetcher.send((to.clone(), FetchEvent::PartFetching(partn as u64)));
let request = fetcher
.client
.get(&*uri)
.header("range", range.as_str())
.header("Expect", "")
.build();
let result = fetcher
.get(
&mut modified,
request,
part_path.into(),
to.clone(),
Some(range_end - range_start),
)
.await;
fetcher.send((to, FetchEvent::PartFetched(partn as u64)));
result
}
})
// Ensure that only this many connections are happenning concurrently at a
// time
.buffered(concurrent as usize)
// This type exploded the stack, and therefore needs to be boxed
.boxed_local();
systems::concatenator(concatenated_file, parts).await?;
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&to, filetime, filetime)
.map_err(|why| Error::FileTime(to, why))?;
}
Ok(())
}
fn cancelled(&self) -> bool {
self.cancel.as_ref().map_or(false, |cancel| cancel.load(Ordering::SeqCst))
}
fn send(&self, event: (Arc<Path>, FetchEvent)) {
if let Some(sender) = self.events.as_ref() {
let _ = sender.unbounded_send(event);
}
}
}
async fn head(
client: &Client,
uri: &str,
) -> Result<Option<Response>, Error> {
match validate(client.head(uri).header("Expect", "").await?).map(Some) {
result @ Ok(_) => result,
Err(Error::Status(StatusCode::NotImplemented)) => Ok(None),
Err(other) => Err(other),
} |
async fn supports_range(
client: &Client,
uri: &str,
length: u64,
) -> Result<bool, Error> {
let response = client
.head(uri)
.header("Expect", "")
.header("range", range::to_string(0, length).as_str())
.await?;
if response.status() == StatusCode::PartialContent {
Ok(true)
} else {
validate(response).map(|_| false)
}
}
async fn timed<F, T>(duration: Duration, future: F) -> Result<T, Error>
where
F: Future<Output = T>,
{
async_std::future::timeout(duration, future).await.map_err(|_| Error::TimedOut)
}
fn validate(response: Response) -> Result<Response, Error> {
let status = response.status();
if status.is_informational() || status.is_success() {
Ok(response)
} else {
Err(Error::Status(status))
}
}
trait ResponseExt {
fn content_length(&self) -> Option<u64>;
fn last_modified(&self) -> Option<DateTime<Utc>>;
}
impl ResponseExt for Response {
fn content_length(&self) -> Option<u64> {
let header = self.header("content-lenght")?.get(0)?;
header.as_str().parse::<u64>().ok()
}
fn last_modified(&self) -> Option<DateTime<Utc>> {
let header = self.header("last-modified")?.get(0)?;
DateTime::parse_from_rfc2822(header.as_str())
.ok()
.map(|tz| tz.with_timezone(&Utc))
}
} | } | random_line_split |
lib.rs | #![recursion_limit = "1024"]
#[macro_use]
extern crate derive_new;
#[macro_use]
extern crate derive_setters;
#[macro_use]
extern crate log;
#[macro_use]
extern crate thiserror;
pub mod checksum;
mod range;
mod systems;
pub use self::systems::*;
use std::{
fmt::Debug,
io,
num::{NonZeroU16, NonZeroU32, NonZeroU64},
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, UNIX_EPOCH},
};
use async_std::{
fs::{self, File},
prelude::*,
};
use chrono::{DateTime, Utc};
use filetime::FileTime;
use futures::{
channel::mpsc,
stream::{self, StreamExt},
};
use http_client::native::NativeClient;
use numtoa::NumToA;
use surf::{
Client, Request, Response, StatusCode
};
pub type EventSender = mpsc::UnboundedSender<(Arc<Path>, FetchEvent)>;
pub type Output<T> = (Arc<Path>, Result<T, Error>);
/// An error from the asynchronous file fetcher.
#[derive(Debug, Error)]
pub enum Error {
#[error("task was cancelled")]
Cancelled,
#[error("http client error")]
Client(surf::Error),
#[error("unable to concatenate fetched parts")]
Concatenate(#[source] io::Error),
#[error("unable to create file")]
FileCreate(#[source] io::Error),
#[error("unable to set timestamp on {:?}", _0)]
FileTime(Arc<Path>, #[source] io::Error),
#[error("content length is an invalid range")]
InvalidRange(#[source] io::Error),
#[error("unable to remove file with bad metadata")]
MetadataRemove(#[source] io::Error),
#[error("destination has no file name")]
Nameless,
#[error("unable to open fetched part")]
OpenPart(Arc<Path>, #[source] io::Error),
#[error("destination lacks parent")]
Parentless,
#[error("connection timed out")]
TimedOut,
#[error("error writing to file")]
Write(#[source] io::Error),
#[error("failed to rename partial to destination")]
Rename(#[source] io::Error),
#[error("server responded with an error: {}", _0)]
Status(StatusCode),
}
/// Information about a source being fetched.
#[derive(Debug, Setters)]
pub struct Source {
/// URLs whereby the file can be found.
#[setters(skip)]
pub urls: Arc<[Box<str>]>,
/// Where the file shall ultimately be fetched to.
#[setters(skip)]
pub dest: Arc<Path>,
/// Optional location to store the partial file
#[setters(strip_option)]
#[setters(into)]
pub part: Option<Arc<Path>>,
}
impl Source {
pub fn new(urls: impl Into<Arc<[Box<str>]>>, dest: impl Into<Arc<Path>>) -> Self {
Self { urls: urls.into(), dest: dest.into(), part: None }
}
}
impl From<surf::Error> for Error {
fn from(e: surf::Error) -> Self { Self::Client(e) }
}
/// Events which are submitted by the fetcher.
#[derive(Debug)]
pub enum | {
/// Signals that this file was already fetched.
AlreadyFetched,
/// States that we know the length of the file being fetched.
ContentLength(u64),
/// Notifies that the file has been fetched.
Fetched,
/// Notifies that a file is being fetched.
Fetching,
/// Reports the amount of bytes that have been read for a file.
Progress(usize),
/// Reports that a part of a file is being fetched.
PartFetching(u64),
/// Reports that a part has been fetched.
PartFetched(u64),
}
/// An asynchronous file fetcher for clients fetching files.
///
/// The futures generated by the fetcher are compatible with single and multi-threaded
/// runtimes, allowing you to choose between the runtime that works best for your
/// application. A single-threaded runtime is generally recommended for fetching files,
/// as your network connection is unlikely to be faster than a single CPU core.
#[derive(new, Setters)]
pub struct Fetcher {
#[setters(skip)]
client: Client,
/// When set, cancels any active operations.
#[new(default)]
#[setters(strip_option)]
cancel: Option<Arc<AtomicBool>>,
/// The number of concurrent connections to sustain per file being fetched.
#[new(default)]
connections_per_file: Option<NonZeroU16>,
/// The number of attempts to make when a request fails.
#[new(value = "unsafe { NonZeroU16::new_unchecked(3) } ")]
retries: NonZeroU16,
/// The maximum size of a part file when downloading in parts.
#[new(value = "unsafe { NonZeroU32::new_unchecked(2 * 1024 * 1024) }")]
max_part_size: NonZeroU32,
/// The time to wait between chunks before giving up.
#[new(default)]
#[setters(strip_option)]
timeout: Option<Duration>,
/// Holds a sender for submitting events to.
#[new(default)]
#[setters(into)]
#[setters(strip_option)]
events: Option<Arc<EventSender>>,
}
impl Default for Fetcher {
fn default() -> Self { Self::new(Client::with_http_client(NativeClient::default())) }
}
impl Fetcher {
/// Wraps the fetcher in an Arc.
pub fn into_arc(self) -> Arc<Self> { Arc::new(self) }
/// Request a file from one or more URIs.
///
/// At least one URI must be provided as a source for the file. Each additional URI
/// serves as a mirror for failover and load-balancing purposes.
pub async fn request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => Ok(()),
Err(mut why) => {
for _ in 1..self.retries.get() {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => return Ok(()),
Err(cause) => why = cause,
}
}
Err(why)
}
}
}
async fn inner_request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
let mut modified = None;
let mut length = None;
let mut if_modified_since = None;
// If the file already exists, validate that it is the same.
if to.exists() {
if let Some(response) = head(&self.client, &*uris[0]).await? {
let content_length = response.content_length();
modified = response.last_modified();
if let (Some(content_length), Some(last_modified)) =
(content_length, modified)
{
match fs::metadata(to.as_ref()).await {
Ok(metadata) => {
let modified = metadata.modified().map_err(Error::Write)?;
let ts = modified
.duration_since(UNIX_EPOCH)
.expect("time went backwards");
if metadata.len() == content_length
&& ts.as_secs() == last_modified.timestamp() as u64
{
self.send((to, FetchEvent::AlreadyFetched));
return Ok(());
}
if_modified_since =
Some(DateTime::<Utc>::from(modified).to_rfc2822());
length = Some(content_length);
}
Err(why) => {
error!("failed to fetch metadata of {:?}: {}", to, why);
fs::remove_file(to.as_ref())
.await
.map_err(Error::MetadataRemove)?;
}
}
}
}
}
// If set, this will use multiple connections to download a file in parts.
if let Some(connections) = self.connections_per_file {
if let Some(response) = head(&self.client, &*uris[0]).await? {
modified = response.last_modified();
let length = match length {
Some(length) => Some(length),
None => response.content_length(),
};
if let Some(length) = length {
if supports_range(&self.client, &*uris[0], length).await? {
self.send((to.clone(), FetchEvent::ContentLength(length)));
return self
.get_many(length, connections.get(), uris, to, modified)
.await;
}
}
}
}
let mut request = self.client.get(&*uris[0]).header("Expect", "").build();
if let Some(modified_since) = if_modified_since {
request.set_header("if-modified-since", modified_since);
}
let path =
match self.get(&mut modified, request, to.clone(), to.clone(), None).await {
Ok(path) => path,
// Server does not support if-modified-since
Err(Error::Status(StatusCode::NotImplemented)) => {
let request = self.client.get(&*uris[0]).header("Expect", "").build();
self.get(&mut modified, request, to.clone(), to, None).await?
}
Err(why) => return Err(why),
};
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&path, filetime, filetime)
.map_err(move |why| Error::FileTime(path, why))?;
}
Ok(())
}
async fn get(
&self,
modified: &mut Option<DateTime<Utc>>,
request: Request,
to: Arc<Path>,
dest: Arc<Path>,
length: Option<u64>,
) -> Result<Arc<Path>, Error> {
let mut file = File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
if let Some(length) = length {
file.set_len(length).await.map_err(Error::Write)?;
}
let response = &mut validate(if let Some(duration) = self.timeout {
timed(duration, async { self.client.send(request).await.map_err(Error::from) }).await??
} else {
self.client.send(request).await?
})?;
if modified.is_none() {
*modified = response.last_modified();
}
if response.status() == StatusCode::NotModified {
return Ok(to);
}
let buffer = &mut [0u8; 8 * 1024];
let mut read;
loop {
if self.cancelled() {
return Err(Error::Cancelled);
}
let reader = async { response.read(buffer).await.map_err(Error::Write) };
read = match self.timeout {
Some(duration) => timed(duration, reader).await??,
None => reader.await?,
};
if read!= 0 {
self.send((dest.clone(), FetchEvent::Progress(read)));
file.write_all(&buffer[..read]).await.map_err(Error::Write)?;
} else {
break;
}
}
Ok(to)
}
async fn get_many(
self: Arc<Self>,
length: u64,
concurrent: u16,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
mut modified: Option<DateTime<Utc>>,
) -> Result<(), Error> {
let parent = to.parent().ok_or(Error::Parentless)?;
let filename = to.file_name().ok_or(Error::Nameless)?;
let mut buf = [0u8; 20];
// The destination which parts will be concatenated to.
let concatenated_file =
&mut File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
let max_part_size =
unsafe { NonZeroU64::new_unchecked(u64::from(self.max_part_size.get())) };
let to_ = to.clone();
let parts = stream::iter(range::generate(length, max_part_size).enumerate())
// Generate a future for fetching each part that a range describes.
.map(move |(partn, (range_start, range_end))| {
let uri = uris[partn % uris.len()].clone();
let part_path = {
let mut new_filename = filename.to_os_string();
new_filename
.push(&[".part", partn.numtoa_str(10, &mut buf)].concat());
parent.join(new_filename)
};
let fetcher = self.clone();
let to = to_.clone();
async move {
let range = range::to_string(range_start, range_end);
fetcher.send((to.clone(), FetchEvent::PartFetching(partn as u64)));
let request = fetcher
.client
.get(&*uri)
.header("range", range.as_str())
.header("Expect", "")
.build();
let result = fetcher
.get(
&mut modified,
request,
part_path.into(),
to.clone(),
Some(range_end - range_start),
)
.await;
fetcher.send((to, FetchEvent::PartFetched(partn as u64)));
result
}
})
// Ensure that only this many connections are happenning concurrently at a
// time
.buffered(concurrent as usize)
// This type exploded the stack, and therefore needs to be boxed
.boxed_local();
systems::concatenator(concatenated_file, parts).await?;
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&to, filetime, filetime)
.map_err(|why| Error::FileTime(to, why))?;
}
Ok(())
}
fn cancelled(&self) -> bool {
self.cancel.as_ref().map_or(false, |cancel| cancel.load(Ordering::SeqCst))
}
fn send(&self, event: (Arc<Path>, FetchEvent)) {
if let Some(sender) = self.events.as_ref() {
let _ = sender.unbounded_send(event);
}
}
}
async fn head(
client: &Client,
uri: &str,
) -> Result<Option<Response>, Error> {
match validate(client.head(uri).header("Expect", "").await?).map(Some) {
result @ Ok(_) => result,
Err(Error::Status(StatusCode::NotImplemented)) => Ok(None),
Err(other) => Err(other),
}
}
async fn supports_range(
client: &Client,
uri: &str,
length: u64,
) -> Result<bool, Error> {
let response = client
.head(uri)
.header("Expect", "")
.header("range", range::to_string(0, length).as_str())
.await?;
if response.status() == StatusCode::PartialContent {
Ok(true)
} else {
validate(response).map(|_| false)
}
}
async fn timed<F, T>(duration: Duration, future: F) -> Result<T, Error>
where
F: Future<Output = T>,
{
async_std::future::timeout(duration, future).await.map_err(|_| Error::TimedOut)
}
fn validate(response: Response) -> Result<Response, Error> {
let status = response.status();
if status.is_informational() || status.is_success() {
Ok(response)
} else {
Err(Error::Status(status))
}
}
trait ResponseExt {
fn content_length(&self) -> Option<u64>;
fn last_modified(&self) -> Option<DateTime<Utc>>;
}
impl ResponseExt for Response {
fn content_length(&self) -> Option<u64> {
let header = self.header("content-lenght")?.get(0)?;
header.as_str().parse::<u64>().ok()
}
fn last_modified(&self) -> Option<DateTime<Utc>> {
let header = self.header("last-modified")?.get(0)?;
DateTime::parse_from_rfc2822(header.as_str())
.ok()
.map(|tz| tz.with_timezone(&Utc))
}
}
| FetchEvent | identifier_name |
lib.rs | #![recursion_limit = "1024"]
#[macro_use]
extern crate derive_new;
#[macro_use]
extern crate derive_setters;
#[macro_use]
extern crate log;
#[macro_use]
extern crate thiserror;
pub mod checksum;
mod range;
mod systems;
pub use self::systems::*;
use std::{
fmt::Debug,
io,
num::{NonZeroU16, NonZeroU32, NonZeroU64},
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, UNIX_EPOCH},
};
use async_std::{
fs::{self, File},
prelude::*,
};
use chrono::{DateTime, Utc};
use filetime::FileTime;
use futures::{
channel::mpsc,
stream::{self, StreamExt},
};
use http_client::native::NativeClient;
use numtoa::NumToA;
use surf::{
Client, Request, Response, StatusCode
};
pub type EventSender = mpsc::UnboundedSender<(Arc<Path>, FetchEvent)>;
pub type Output<T> = (Arc<Path>, Result<T, Error>);
/// An error from the asynchronous file fetcher.
#[derive(Debug, Error)]
pub enum Error {
#[error("task was cancelled")]
Cancelled,
#[error("http client error")]
Client(surf::Error),
#[error("unable to concatenate fetched parts")]
Concatenate(#[source] io::Error),
#[error("unable to create file")]
FileCreate(#[source] io::Error),
#[error("unable to set timestamp on {:?}", _0)]
FileTime(Arc<Path>, #[source] io::Error),
#[error("content length is an invalid range")]
InvalidRange(#[source] io::Error),
#[error("unable to remove file with bad metadata")]
MetadataRemove(#[source] io::Error),
#[error("destination has no file name")]
Nameless,
#[error("unable to open fetched part")]
OpenPart(Arc<Path>, #[source] io::Error),
#[error("destination lacks parent")]
Parentless,
#[error("connection timed out")]
TimedOut,
#[error("error writing to file")]
Write(#[source] io::Error),
#[error("failed to rename partial to destination")]
Rename(#[source] io::Error),
#[error("server responded with an error: {}", _0)]
Status(StatusCode),
}
/// Information about a source being fetched.
#[derive(Debug, Setters)]
pub struct Source {
/// URLs whereby the file can be found.
#[setters(skip)]
pub urls: Arc<[Box<str>]>,
/// Where the file shall ultimately be fetched to.
#[setters(skip)]
pub dest: Arc<Path>,
/// Optional location to store the partial file
#[setters(strip_option)]
#[setters(into)]
pub part: Option<Arc<Path>>,
}
impl Source {
pub fn new(urls: impl Into<Arc<[Box<str>]>>, dest: impl Into<Arc<Path>>) -> Self {
Self { urls: urls.into(), dest: dest.into(), part: None }
}
}
impl From<surf::Error> for Error {
fn from(e: surf::Error) -> Self { Self::Client(e) }
}
/// Events which are submitted by the fetcher.
#[derive(Debug)]
pub enum FetchEvent {
/// Signals that this file was already fetched.
AlreadyFetched,
/// States that we know the length of the file being fetched.
ContentLength(u64),
/// Notifies that the file has been fetched.
Fetched,
/// Notifies that a file is being fetched.
Fetching,
/// Reports the amount of bytes that have been read for a file.
Progress(usize),
/// Reports that a part of a file is being fetched.
PartFetching(u64),
/// Reports that a part has been fetched.
PartFetched(u64),
}
/// An asynchronous file fetcher for clients fetching files.
///
/// The futures generated by the fetcher are compatible with single and multi-threaded
/// runtimes, allowing you to choose between the runtime that works best for your
/// application. A single-threaded runtime is generally recommended for fetching files,
/// as your network connection is unlikely to be faster than a single CPU core.
#[derive(new, Setters)]
pub struct Fetcher {
#[setters(skip)]
client: Client,
/// When set, cancels any active operations.
#[new(default)]
#[setters(strip_option)]
cancel: Option<Arc<AtomicBool>>,
/// The number of concurrent connections to sustain per file being fetched.
#[new(default)]
connections_per_file: Option<NonZeroU16>,
/// The number of attempts to make when a request fails.
#[new(value = "unsafe { NonZeroU16::new_unchecked(3) } ")]
retries: NonZeroU16,
/// The maximum size of a part file when downloading in parts.
#[new(value = "unsafe { NonZeroU32::new_unchecked(2 * 1024 * 1024) }")]
max_part_size: NonZeroU32,
/// The time to wait between chunks before giving up.
#[new(default)]
#[setters(strip_option)]
timeout: Option<Duration>,
/// Holds a sender for submitting events to.
#[new(default)]
#[setters(into)]
#[setters(strip_option)]
events: Option<Arc<EventSender>>,
}
impl Default for Fetcher {
fn default() -> Self { Self::new(Client::with_http_client(NativeClient::default())) }
}
impl Fetcher {
/// Wraps the fetcher in an Arc.
pub fn into_arc(self) -> Arc<Self> { Arc::new(self) }
/// Request a file from one or more URIs.
///
/// At least one URI must be provided as a source for the file. Each additional URI
/// serves as a mirror for failover and load-balancing purposes.
pub async fn request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => Ok(()),
Err(mut why) => {
for _ in 1..self.retries.get() {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => return Ok(()),
Err(cause) => why = cause,
}
}
Err(why)
}
}
}
async fn inner_request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
let mut modified = None;
let mut length = None;
let mut if_modified_since = None;
// If the file already exists, validate that it is the same.
if to.exists() {
if let Some(response) = head(&self.client, &*uris[0]).await? {
let content_length = response.content_length();
modified = response.last_modified();
if let (Some(content_length), Some(last_modified)) =
(content_length, modified)
{
match fs::metadata(to.as_ref()).await {
Ok(metadata) => {
let modified = metadata.modified().map_err(Error::Write)?;
let ts = modified
.duration_since(UNIX_EPOCH)
.expect("time went backwards");
if metadata.len() == content_length
&& ts.as_secs() == last_modified.timestamp() as u64
{
self.send((to, FetchEvent::AlreadyFetched));
return Ok(());
}
if_modified_since =
Some(DateTime::<Utc>::from(modified).to_rfc2822());
length = Some(content_length);
}
Err(why) => {
error!("failed to fetch metadata of {:?}: {}", to, why);
fs::remove_file(to.as_ref())
.await
.map_err(Error::MetadataRemove)?;
}
}
}
}
}
// If set, this will use multiple connections to download a file in parts.
if let Some(connections) = self.connections_per_file {
if let Some(response) = head(&self.client, &*uris[0]).await? {
modified = response.last_modified();
let length = match length {
Some(length) => Some(length),
None => response.content_length(),
};
if let Some(length) = length {
if supports_range(&self.client, &*uris[0], length).await? {
self.send((to.clone(), FetchEvent::ContentLength(length)));
return self
.get_many(length, connections.get(), uris, to, modified)
.await;
}
}
}
}
let mut request = self.client.get(&*uris[0]).header("Expect", "").build();
if let Some(modified_since) = if_modified_since {
request.set_header("if-modified-since", modified_since);
}
let path =
match self.get(&mut modified, request, to.clone(), to.clone(), None).await {
Ok(path) => path,
// Server does not support if-modified-since
Err(Error::Status(StatusCode::NotImplemented)) => {
let request = self.client.get(&*uris[0]).header("Expect", "").build();
self.get(&mut modified, request, to.clone(), to, None).await?
}
Err(why) => return Err(why),
};
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&path, filetime, filetime)
.map_err(move |why| Error::FileTime(path, why))?;
}
Ok(())
}
async fn get(
&self,
modified: &mut Option<DateTime<Utc>>,
request: Request,
to: Arc<Path>,
dest: Arc<Path>,
length: Option<u64>,
) -> Result<Arc<Path>, Error> {
let mut file = File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
if let Some(length) = length {
file.set_len(length).await.map_err(Error::Write)?;
}
let response = &mut validate(if let Some(duration) = self.timeout {
timed(duration, async { self.client.send(request).await.map_err(Error::from) }).await??
} else {
self.client.send(request).await?
})?;
if modified.is_none() {
*modified = response.last_modified();
}
if response.status() == StatusCode::NotModified {
return Ok(to);
}
let buffer = &mut [0u8; 8 * 1024];
let mut read;
loop {
if self.cancelled() {
return Err(Error::Cancelled);
}
let reader = async { response.read(buffer).await.map_err(Error::Write) };
read = match self.timeout {
Some(duration) => timed(duration, reader).await??,
None => reader.await?,
};
if read!= 0 {
self.send((dest.clone(), FetchEvent::Progress(read)));
file.write_all(&buffer[..read]).await.map_err(Error::Write)?;
} else {
break;
}
}
Ok(to)
}
async fn get_many(
self: Arc<Self>,
length: u64,
concurrent: u16,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
mut modified: Option<DateTime<Utc>>,
) -> Result<(), Error> {
let parent = to.parent().ok_or(Error::Parentless)?;
let filename = to.file_name().ok_or(Error::Nameless)?;
let mut buf = [0u8; 20];
// The destination which parts will be concatenated to.
let concatenated_file =
&mut File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
let max_part_size =
unsafe { NonZeroU64::new_unchecked(u64::from(self.max_part_size.get())) };
let to_ = to.clone();
let parts = stream::iter(range::generate(length, max_part_size).enumerate())
// Generate a future for fetching each part that a range describes.
.map(move |(partn, (range_start, range_end))| {
let uri = uris[partn % uris.len()].clone();
let part_path = {
let mut new_filename = filename.to_os_string();
new_filename
.push(&[".part", partn.numtoa_str(10, &mut buf)].concat());
parent.join(new_filename)
};
let fetcher = self.clone();
let to = to_.clone();
async move {
let range = range::to_string(range_start, range_end);
fetcher.send((to.clone(), FetchEvent::PartFetching(partn as u64)));
let request = fetcher
.client
.get(&*uri)
.header("range", range.as_str())
.header("Expect", "")
.build();
let result = fetcher
.get(
&mut modified,
request,
part_path.into(),
to.clone(),
Some(range_end - range_start),
)
.await;
fetcher.send((to, FetchEvent::PartFetched(partn as u64)));
result
}
})
// Ensure that only this many connections are happenning concurrently at a
// time
.buffered(concurrent as usize)
// This type exploded the stack, and therefore needs to be boxed
.boxed_local();
systems::concatenator(concatenated_file, parts).await?;
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&to, filetime, filetime)
.map_err(|why| Error::FileTime(to, why))?;
}
Ok(())
}
fn cancelled(&self) -> bool {
self.cancel.as_ref().map_or(false, |cancel| cancel.load(Ordering::SeqCst))
}
fn send(&self, event: (Arc<Path>, FetchEvent)) {
if let Some(sender) = self.events.as_ref() {
let _ = sender.unbounded_send(event);
}
}
}
async fn head(
client: &Client,
uri: &str,
) -> Result<Option<Response>, Error> {
match validate(client.head(uri).header("Expect", "").await?).map(Some) {
result @ Ok(_) => result,
Err(Error::Status(StatusCode::NotImplemented)) => Ok(None),
Err(other) => Err(other),
}
}
async fn supports_range(
client: &Client,
uri: &str,
length: u64,
) -> Result<bool, Error> {
let response = client
.head(uri)
.header("Expect", "")
.header("range", range::to_string(0, length).as_str())
.await?;
if response.status() == StatusCode::PartialContent {
Ok(true)
} else {
validate(response).map(|_| false)
}
}
async fn timed<F, T>(duration: Duration, future: F) -> Result<T, Error>
where
F: Future<Output = T>,
{
async_std::future::timeout(duration, future).await.map_err(|_| Error::TimedOut)
}
fn validate(response: Response) -> Result<Response, Error> |
trait ResponseExt {
fn content_length(&self) -> Option<u64>;
fn last_modified(&self) -> Option<DateTime<Utc>>;
}
impl ResponseExt for Response {
fn content_length(&self) -> Option<u64> {
let header = self.header("content-lenght")?.get(0)?;
header.as_str().parse::<u64>().ok()
}
fn last_modified(&self) -> Option<DateTime<Utc>> {
let header = self.header("last-modified")?.get(0)?;
DateTime::parse_from_rfc2822(header.as_str())
.ok()
.map(|tz| tz.with_timezone(&Utc))
}
}
| {
let status = response.status();
if status.is_informational() || status.is_success() {
Ok(response)
} else {
Err(Error::Status(status))
}
} | identifier_body |
credential.rs | //! Internal `Credential` and external `CredentialId` ("keyhandle").
use core::cmp::Ordering;
use trussed::{client, syscall, try_syscall, types::KeyId};
pub(crate) use ctap_types::{
// authenticator::{ctap1, ctap2, Error, Request, Response},
ctap2::credential_management::CredentialProtectionPolicy,
sizes::*,
webauthn::PublicKeyCredentialDescriptor,
Bytes,
String,
};
use crate::{Authenticator, Error, Result, UserPresence};
/// As signaled in `get_info`.
///
/// Eventual goal is full support for the CTAP2.1 specification.
#[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
pub enum CtapVersion {
U2fV2,
Fido20,
Fido21Pre,
}
/// External ID of a credential, commonly known as "keyhandle".
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct CredentialId(pub Bytes<MAX_CREDENTIAL_ID_LENGTH>);
// TODO: how to determine necessary size?
// pub type SerializedCredential = Bytes<512>;
// pub type SerializedCredential = Bytes<256>;
pub(crate) type SerializedCredential = trussed::types::Message;
#[derive(Clone, Debug)]
struct EncryptedSerializedCredential(pub trussed::api::reply::Encrypt);
impl TryFrom<EncryptedSerializedCredential> for CredentialId {
type Error = Error;
fn try_from(esc: EncryptedSerializedCredential) -> Result<CredentialId> {
Ok(CredentialId(
trussed::cbor_serialize_bytes(&esc.0).map_err(|_| Error::Other)?,
))
}
}
impl TryFrom<CredentialId> for EncryptedSerializedCredential {
// tag = 16B
// nonce = 12B
type Error = Error;
fn try_from(cid: CredentialId) -> Result<EncryptedSerializedCredential> {
let encrypted_serialized_credential = EncryptedSerializedCredential(
ctap_types::serde::cbor_deserialize(&cid.0).map_err(|_| Error::InvalidCredential)?,
);
Ok(encrypted_serialized_credential)
}
}
/// Credential keys can either be "discoverable" or not.
///
/// The FIDO Alliance likes to refer to "resident keys" as "(client-side) discoverable public key
/// credential sources" now ;)
#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Key {
ResidentKey(KeyId),
// THIS USED TO BE 92 NOW IT'S 96 or 97 or so... waddup?
WrappedKey(Bytes<128>),
}
/// The main content of a `Credential`.
#[derive(
Clone, Debug, PartialEq, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed,
)]
pub struct CredentialData {
// id, name, url
pub rp: ctap_types::webauthn::PublicKeyCredentialRpEntity,
// id, icon, name, display_name
pub user: ctap_types::webauthn::PublicKeyCredentialUserEntity,
// can be just a counter, need to be able to determine "latest"
pub creation_time: u32,
// for stateless deterministic keys, it seems CTAP2 (but not CTAP1) makes signature counters optional
use_counter: bool,
// P256 or Ed25519
pub algorithm: i32,
// for RK in non-deterministic mode: refers to actual key
// TODO(implement enums in cbor-deser): for all others, is a wrapped key
// --> use above Key enum
// #[serde(skip_serializing_if = "Option::is_none")]
// key_id: Option<KeyId>,
pub key: Key,
// extensions
#[serde(skip_serializing_if = "Option::is_none")]
pub hmac_secret: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cred_protect: Option<CredentialProtectionPolicy>,
// TODO: add `sig_counter: Option<CounterId>`,
// and grant RKs a per-credential sig-counter.
}
// TODO: figure out sizes
// We may or may not follow https://github.com/satoshilabs/slips/blob/master/slip-0022.md
/// The core structure this authenticator creates and uses.
#[derive(Clone, Debug, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed)]
pub struct Credential {
ctap: CtapVersion,
pub data: CredentialData,
nonce: Bytes<12>,
}
// Alas... it would be more symmetrical to have Credential { meta, data },
// but let's not break binary compatibility for this.
//
// struct Metadata {
// ctap: CtapVersion,
// nonce: Bytes<12>,
// }
impl core::ops::Deref for Credential {
type Target = CredentialData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
/// Compare credentials based on key + timestamp.
///
/// Likely comparison based on timestamp would be good enough?
impl PartialEq for Credential {
fn eq(&self, other: &Self) -> bool {
(self.creation_time == other.creation_time) && (self.key == other.key)
}
}
impl PartialEq<&Credential> for Credential {
fn eq(&self, other: &&Self) -> bool {
self == *other
}
}
impl Eq for Credential {}
impl Ord for Credential {
fn cmp(&self, other: &Self) -> Ordering {
self.data.creation_time.cmp(&other.data.creation_time)
}
}
/// Order by timestamp of creation.
impl PartialOrd for Credential {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<&Credential> for Credential {
fn partial_cmp(&self, other: &&Self) -> Option<Ordering> {
Some(self.cmp(*other))
}
}
// Bad idea - huge stack
// pub(crate) type CredentialList = Vec<Credential, {ctap_types::sizes::MAX_CREDENTIAL_COUNT_IN_LIST}>;
impl From<CredentialId> for PublicKeyCredentialDescriptor {
fn from(id: CredentialId) -> PublicKeyCredentialDescriptor {
PublicKeyCredentialDescriptor {
id: id.0,
key_type: {
let mut key_type = String::new();
key_type.push_str("public-key").unwrap();
key_type
},
}
}
}
impl Credential {
#[allow(clippy::too_many_arguments)]
pub fn new(
ctap: CtapVersion,
// parameters: &ctap2::make_credential::Parameters,
rp: &ctap_types::webauthn::PublicKeyCredentialRpEntity,
user: &ctap_types::webauthn::PublicKeyCredentialUserEntity,
algorithm: i32,
key: Key,
timestamp: u32,
hmac_secret: Option<bool>,
cred_protect: Option<CredentialProtectionPolicy>,
nonce: [u8; 12],
) -> Self {
info!("credential for algorithm {}", algorithm);
let data = CredentialData {
rp: rp.clone(),
user: user.clone(),
creation_time: timestamp,
use_counter: true,
algorithm,
key,
hmac_secret,
cred_protect,
};
Credential {
ctap,
data,
nonce: Bytes::from_slice(&nonce).unwrap(),
}
}
// ID (or "keyhandle") for the credential.
//
// Originally, the entire data was serialized, and its encryption
// (binding RP as associated data) used as a keyhandle.
//
// However, this leads to problems with relying parties. According to the old U2F
// spec, the length of a keyhandle is encoded as one byte, whereas this procedure would
// generate keyhandles of length ~320 bytes.
//
// Therefore, inessential metadata is stripped before serialization, ensuring
// the ID will stay below 255 bytes.
//
// Existing keyhandles can still be decoded
pub fn id<T: client::Chacha8Poly1305 + client::Sha256>(
&self,
trussed: &mut T,
key_encryption_key: KeyId,
rp_id_hash: Option<&Bytes<32>>,
) -> Result<CredentialId> {
let serialized_credential = self.strip().serialize()?;
let message = &serialized_credential;
// info!("serialized cred = {:?}", message).ok();
let rp_id_hash: Bytes<32> = if let Some(hash) = rp_id_hash {
hash.clone()
} else {
syscall!(trussed.hash_sha256(self.rp.id.as_ref()))
.hash
.to_bytes()
.map_err(|_| Error::Other)?
};
let associated_data = &rp_id_hash[..];
let nonce: [u8; 12] = self.nonce.as_slice().try_into().unwrap();
let encrypted_serialized_credential = EncryptedSerializedCredential(syscall!(trussed
.encrypt_chacha8poly1305(key_encryption_key, message, associated_data, Some(&nonce))));
let credential_id: CredentialId = encrypted_serialized_credential
.try_into()
.map_err(|_| Error::RequestTooLarge)?;
Ok(credential_id)
}
pub fn serialize(&self) -> Result<SerializedCredential> {
trussed::cbor_serialize_bytes(self).map_err(|_| Error::Other)
}
pub fn deserialize(bytes: &SerializedCredential) -> Result<Self> {
match ctap_types::serde::cbor_deserialize(bytes) {
Ok(s) => Ok(s),
Err(_) => {
info_now!("could not deserialize {:?}", bytes);
Err(Error::Other)
}
}
}
pub fn try_from<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
descriptor: &PublicKeyCredentialDescriptor,
) -> Result<Self> {
Self::try_from_bytes(authnr, rp_id_hash, &descriptor.id)
}
pub fn | <UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
id: &[u8],
) -> Result<Self> {
let mut cred: Bytes<MAX_CREDENTIAL_ID_LENGTH> = Bytes::new();
cred.extend_from_slice(id)
.map_err(|_| Error::InvalidCredential)?;
let encrypted_serialized = EncryptedSerializedCredential::try_from(CredentialId(cred))?;
let kek = authnr
.state
.persistent
.key_encryption_key(&mut authnr.trussed)?;
let serialized = try_syscall!(authnr.trussed.decrypt_chacha8poly1305(
// TODO: use RpId as associated data here?
kek,
&encrypted_serialized.0.ciphertext,
&rp_id_hash[..],
&encrypted_serialized.0.nonce,
&encrypted_serialized.0.tag,
))
.map_err(|_| Error::InvalidCredential)?
.plaintext
.ok_or(Error::InvalidCredential)?;
let credential =
Credential::deserialize(&serialized).map_err(|_| Error::InvalidCredential)?;
Ok(credential)
}
// Remove inessential metadata from credential.
//
// Called by the `id` method, see its documentation.
pub fn strip(&self) -> Self {
info_now!(":: stripping ID");
let mut stripped = self.clone();
let data = &mut stripped.data;
data.rp.name = None;
data.rp.icon = None;
data.user.icon = None;
data.user.name = None;
data.user.display_name = None;
// data.hmac_secret = None;
// data.cred_protect = None;
stripped
}
}
#[cfg(test)]
mod test {
use super::*;
fn credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: String::from("John Doe"),
name: None,
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: Bytes::from_slice(&[1, 2, 3]).unwrap(),
icon: None,
name: None,
display_name: None,
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(Bytes::from_slice(&[1, 2, 3]).unwrap()),
hmac_secret: Some(false),
cred_protect: None,
}
}
fn random_bytes<const N: usize>() -> Bytes<N> {
use rand::{
distributions::{Distribution, Uniform},
rngs::OsRng,
RngCore,
};
let mut bytes = Bytes::default();
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
bytes.resize_default(n).unwrap();
OsRng.fill_bytes(&mut bytes);
bytes
}
#[allow(dead_code)]
fn maybe_random_bytes<const N: usize>() -> Option<Bytes<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1!= 0 {
Some(random_bytes())
} else {
None
}
}
fn random_string<const N: usize>() -> String<N> {
use rand::{
distributions::{Alphanumeric, Distribution, Uniform},
rngs::OsRng,
Rng,
};
use std::str::FromStr;
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
let std_string: std::string::String = OsRng
.sample_iter(&Alphanumeric)
.take(n)
.map(char::from)
.collect();
String::from_str(&std_string).unwrap()
}
fn maybe_random_string<const N: usize>() -> Option<String<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1!= 0 {
Some(random_string())
} else {
None
}
}
fn random_credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: random_string(),
name: maybe_random_string(),
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: random_bytes(), //Bytes::from_slice(&[1,2,3]).unwrap(),
icon: maybe_random_string(),
name: maybe_random_string(),
display_name: maybe_random_string(),
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(random_bytes()),
hmac_secret: Some(false),
cred_protect: None,
}
}
#[test]
fn skip_credential_data_options() {
use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
let credential_data = credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
let credential_data = random_credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
}
// use quickcheck::TestResult;
// quickcheck::quickcheck! {
// fn prop(
// rp_id: std::string::String,
// rp_name: Option<std::string::String>,
// rp_url: Option<std::string::String>,
// user_id: std::vec::Vec<u8>,
// user_name: Option<std::string::String>,
// creation_time: u32,
// use_counter: bool,
// algorithm: i32
// ) -> TestResult {
// use std::str::FromStr;
// use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
// use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
// let rp_name = &rp_name.as_ref().map(|string| string.as_str());
// let rp_url = &rp_url.as_ref().map(|string| string.as_str());
// let user_name = &user_name.as_ref().map(|string| string.as_str());
// let discard = [
// rp_id.len() > 256,
// rp_name.unwrap_or(&"").len() > 64,
// rp_url.unwrap_or(&"").len() > 64,
// user_id.len() > 64,
// user_name.unwrap_or(&"").len() > 64,
// ];
// if discard.iter().any(|&x| x) {
// return TestResult::discard();
// }
// let credential_data = CredentialData {
// rp: PublicKeyCredentialRpEntity {
// id: String::from_str(&rp_id).unwrap(),
// name: rp_name.map(|rp_name| String::from_str(rp_name).unwrap()),
// url: rp_url.map(|rp_url| String::from_str(rp_url).unwrap()),
// },
// user: PublicKeyCredentialUserEntity {
// id: Bytes::from_slice(&user_id).unwrap(),
// icon: maybe_random_string(),
// name: user_name.map(|user_name| String::from_str(user_name).unwrap()),
// display_name: maybe_random_string(),
// },
// creation_time,
// use_counter,
// algorithm,
// key: Key::WrappedKey(random_bytes()),
// hmac_secret: Some(false),
// cred_protect: None,
// };
// let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
// let deserialized: CredentialData = deserialize(&serialization).unwrap();
// TestResult::from_bool(credential_data == deserialized)
// }
// }
}
| try_from_bytes | identifier_name |
credential.rs | //! Internal `Credential` and external `CredentialId` ("keyhandle").
use core::cmp::Ordering;
use trussed::{client, syscall, try_syscall, types::KeyId};
pub(crate) use ctap_types::{
// authenticator::{ctap1, ctap2, Error, Request, Response},
ctap2::credential_management::CredentialProtectionPolicy,
sizes::*,
webauthn::PublicKeyCredentialDescriptor,
Bytes,
String,
};
use crate::{Authenticator, Error, Result, UserPresence};
| #[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
pub enum CtapVersion {
U2fV2,
Fido20,
Fido21Pre,
}
/// External ID of a credential, commonly known as "keyhandle".
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct CredentialId(pub Bytes<MAX_CREDENTIAL_ID_LENGTH>);
// TODO: how to determine necessary size?
// pub type SerializedCredential = Bytes<512>;
// pub type SerializedCredential = Bytes<256>;
pub(crate) type SerializedCredential = trussed::types::Message;
#[derive(Clone, Debug)]
struct EncryptedSerializedCredential(pub trussed::api::reply::Encrypt);
impl TryFrom<EncryptedSerializedCredential> for CredentialId {
type Error = Error;
fn try_from(esc: EncryptedSerializedCredential) -> Result<CredentialId> {
Ok(CredentialId(
trussed::cbor_serialize_bytes(&esc.0).map_err(|_| Error::Other)?,
))
}
}
impl TryFrom<CredentialId> for EncryptedSerializedCredential {
// tag = 16B
// nonce = 12B
type Error = Error;
fn try_from(cid: CredentialId) -> Result<EncryptedSerializedCredential> {
let encrypted_serialized_credential = EncryptedSerializedCredential(
ctap_types::serde::cbor_deserialize(&cid.0).map_err(|_| Error::InvalidCredential)?,
);
Ok(encrypted_serialized_credential)
}
}
/// Credential keys can either be "discoverable" or not.
///
/// The FIDO Alliance likes to refer to "resident keys" as "(client-side) discoverable public key
/// credential sources" now ;)
#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Key {
ResidentKey(KeyId),
// THIS USED TO BE 92 NOW IT'S 96 or 97 or so... waddup?
WrappedKey(Bytes<128>),
}
/// The main content of a `Credential`.
#[derive(
Clone, Debug, PartialEq, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed,
)]
pub struct CredentialData {
// id, name, url
pub rp: ctap_types::webauthn::PublicKeyCredentialRpEntity,
// id, icon, name, display_name
pub user: ctap_types::webauthn::PublicKeyCredentialUserEntity,
// can be just a counter, need to be able to determine "latest"
pub creation_time: u32,
// for stateless deterministic keys, it seems CTAP2 (but not CTAP1) makes signature counters optional
use_counter: bool,
// P256 or Ed25519
pub algorithm: i32,
// for RK in non-deterministic mode: refers to actual key
// TODO(implement enums in cbor-deser): for all others, is a wrapped key
// --> use above Key enum
// #[serde(skip_serializing_if = "Option::is_none")]
// key_id: Option<KeyId>,
pub key: Key,
// extensions
#[serde(skip_serializing_if = "Option::is_none")]
pub hmac_secret: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cred_protect: Option<CredentialProtectionPolicy>,
// TODO: add `sig_counter: Option<CounterId>`,
// and grant RKs a per-credential sig-counter.
}
// TODO: figure out sizes
// We may or may not follow https://github.com/satoshilabs/slips/blob/master/slip-0022.md
/// The core structure this authenticator creates and uses.
#[derive(Clone, Debug, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed)]
pub struct Credential {
ctap: CtapVersion,
pub data: CredentialData,
nonce: Bytes<12>,
}
// Alas... it would be more symmetrical to have Credential { meta, data },
// but let's not break binary compatibility for this.
//
// struct Metadata {
// ctap: CtapVersion,
// nonce: Bytes<12>,
// }
impl core::ops::Deref for Credential {
type Target = CredentialData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
/// Compare credentials based on key + timestamp.
///
/// Likely comparison based on timestamp would be good enough?
impl PartialEq for Credential {
fn eq(&self, other: &Self) -> bool {
(self.creation_time == other.creation_time) && (self.key == other.key)
}
}
impl PartialEq<&Credential> for Credential {
fn eq(&self, other: &&Self) -> bool {
self == *other
}
}
impl Eq for Credential {}
impl Ord for Credential {
fn cmp(&self, other: &Self) -> Ordering {
self.data.creation_time.cmp(&other.data.creation_time)
}
}
/// Order by timestamp of creation.
impl PartialOrd for Credential {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<&Credential> for Credential {
fn partial_cmp(&self, other: &&Self) -> Option<Ordering> {
Some(self.cmp(*other))
}
}
// Bad idea - huge stack
// pub(crate) type CredentialList = Vec<Credential, {ctap_types::sizes::MAX_CREDENTIAL_COUNT_IN_LIST}>;
impl From<CredentialId> for PublicKeyCredentialDescriptor {
fn from(id: CredentialId) -> PublicKeyCredentialDescriptor {
PublicKeyCredentialDescriptor {
id: id.0,
key_type: {
let mut key_type = String::new();
key_type.push_str("public-key").unwrap();
key_type
},
}
}
}
impl Credential {
#[allow(clippy::too_many_arguments)]
pub fn new(
ctap: CtapVersion,
// parameters: &ctap2::make_credential::Parameters,
rp: &ctap_types::webauthn::PublicKeyCredentialRpEntity,
user: &ctap_types::webauthn::PublicKeyCredentialUserEntity,
algorithm: i32,
key: Key,
timestamp: u32,
hmac_secret: Option<bool>,
cred_protect: Option<CredentialProtectionPolicy>,
nonce: [u8; 12],
) -> Self {
info!("credential for algorithm {}", algorithm);
let data = CredentialData {
rp: rp.clone(),
user: user.clone(),
creation_time: timestamp,
use_counter: true,
algorithm,
key,
hmac_secret,
cred_protect,
};
Credential {
ctap,
data,
nonce: Bytes::from_slice(&nonce).unwrap(),
}
}
// ID (or "keyhandle") for the credential.
//
// Originally, the entire data was serialized, and its encryption
// (binding RP as associated data) used as a keyhandle.
//
// However, this leads to problems with relying parties. According to the old U2F
// spec, the length of a keyhandle is encoded as one byte, whereas this procedure would
// generate keyhandles of length ~320 bytes.
//
// Therefore, inessential metadata is stripped before serialization, ensuring
// the ID will stay below 255 bytes.
//
// Existing keyhandles can still be decoded
pub fn id<T: client::Chacha8Poly1305 + client::Sha256>(
&self,
trussed: &mut T,
key_encryption_key: KeyId,
rp_id_hash: Option<&Bytes<32>>,
) -> Result<CredentialId> {
let serialized_credential = self.strip().serialize()?;
let message = &serialized_credential;
// info!("serialized cred = {:?}", message).ok();
let rp_id_hash: Bytes<32> = if let Some(hash) = rp_id_hash {
hash.clone()
} else {
syscall!(trussed.hash_sha256(self.rp.id.as_ref()))
.hash
.to_bytes()
.map_err(|_| Error::Other)?
};
let associated_data = &rp_id_hash[..];
let nonce: [u8; 12] = self.nonce.as_slice().try_into().unwrap();
let encrypted_serialized_credential = EncryptedSerializedCredential(syscall!(trussed
.encrypt_chacha8poly1305(key_encryption_key, message, associated_data, Some(&nonce))));
let credential_id: CredentialId = encrypted_serialized_credential
.try_into()
.map_err(|_| Error::RequestTooLarge)?;
Ok(credential_id)
}
pub fn serialize(&self) -> Result<SerializedCredential> {
trussed::cbor_serialize_bytes(self).map_err(|_| Error::Other)
}
pub fn deserialize(bytes: &SerializedCredential) -> Result<Self> {
match ctap_types::serde::cbor_deserialize(bytes) {
Ok(s) => Ok(s),
Err(_) => {
info_now!("could not deserialize {:?}", bytes);
Err(Error::Other)
}
}
}
pub fn try_from<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
descriptor: &PublicKeyCredentialDescriptor,
) -> Result<Self> {
Self::try_from_bytes(authnr, rp_id_hash, &descriptor.id)
}
pub fn try_from_bytes<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
id: &[u8],
) -> Result<Self> {
let mut cred: Bytes<MAX_CREDENTIAL_ID_LENGTH> = Bytes::new();
cred.extend_from_slice(id)
.map_err(|_| Error::InvalidCredential)?;
let encrypted_serialized = EncryptedSerializedCredential::try_from(CredentialId(cred))?;
let kek = authnr
.state
.persistent
.key_encryption_key(&mut authnr.trussed)?;
let serialized = try_syscall!(authnr.trussed.decrypt_chacha8poly1305(
// TODO: use RpId as associated data here?
kek,
&encrypted_serialized.0.ciphertext,
&rp_id_hash[..],
&encrypted_serialized.0.nonce,
&encrypted_serialized.0.tag,
))
.map_err(|_| Error::InvalidCredential)?
.plaintext
.ok_or(Error::InvalidCredential)?;
let credential =
Credential::deserialize(&serialized).map_err(|_| Error::InvalidCredential)?;
Ok(credential)
}
// Remove inessential metadata from credential.
//
// Called by the `id` method, see its documentation.
pub fn strip(&self) -> Self {
info_now!(":: stripping ID");
let mut stripped = self.clone();
let data = &mut stripped.data;
data.rp.name = None;
data.rp.icon = None;
data.user.icon = None;
data.user.name = None;
data.user.display_name = None;
// data.hmac_secret = None;
// data.cred_protect = None;
stripped
}
}
#[cfg(test)]
mod test {
use super::*;
fn credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: String::from("John Doe"),
name: None,
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: Bytes::from_slice(&[1, 2, 3]).unwrap(),
icon: None,
name: None,
display_name: None,
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(Bytes::from_slice(&[1, 2, 3]).unwrap()),
hmac_secret: Some(false),
cred_protect: None,
}
}
fn random_bytes<const N: usize>() -> Bytes<N> {
use rand::{
distributions::{Distribution, Uniform},
rngs::OsRng,
RngCore,
};
let mut bytes = Bytes::default();
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
bytes.resize_default(n).unwrap();
OsRng.fill_bytes(&mut bytes);
bytes
}
#[allow(dead_code)]
fn maybe_random_bytes<const N: usize>() -> Option<Bytes<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1!= 0 {
Some(random_bytes())
} else {
None
}
}
fn random_string<const N: usize>() -> String<N> {
use rand::{
distributions::{Alphanumeric, Distribution, Uniform},
rngs::OsRng,
Rng,
};
use std::str::FromStr;
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
let std_string: std::string::String = OsRng
.sample_iter(&Alphanumeric)
.take(n)
.map(char::from)
.collect();
String::from_str(&std_string).unwrap()
}
fn maybe_random_string<const N: usize>() -> Option<String<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1!= 0 {
Some(random_string())
} else {
None
}
}
fn random_credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: random_string(),
name: maybe_random_string(),
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: random_bytes(), //Bytes::from_slice(&[1,2,3]).unwrap(),
icon: maybe_random_string(),
name: maybe_random_string(),
display_name: maybe_random_string(),
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(random_bytes()),
hmac_secret: Some(false),
cred_protect: None,
}
}
#[test]
fn skip_credential_data_options() {
use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
let credential_data = credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
let credential_data = random_credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
}
// use quickcheck::TestResult;
// quickcheck::quickcheck! {
// fn prop(
// rp_id: std::string::String,
// rp_name: Option<std::string::String>,
// rp_url: Option<std::string::String>,
// user_id: std::vec::Vec<u8>,
// user_name: Option<std::string::String>,
// creation_time: u32,
// use_counter: bool,
// algorithm: i32
// ) -> TestResult {
// use std::str::FromStr;
// use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
// use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
// let rp_name = &rp_name.as_ref().map(|string| string.as_str());
// let rp_url = &rp_url.as_ref().map(|string| string.as_str());
// let user_name = &user_name.as_ref().map(|string| string.as_str());
// let discard = [
// rp_id.len() > 256,
// rp_name.unwrap_or(&"").len() > 64,
// rp_url.unwrap_or(&"").len() > 64,
// user_id.len() > 64,
// user_name.unwrap_or(&"").len() > 64,
// ];
// if discard.iter().any(|&x| x) {
// return TestResult::discard();
// }
// let credential_data = CredentialData {
// rp: PublicKeyCredentialRpEntity {
// id: String::from_str(&rp_id).unwrap(),
// name: rp_name.map(|rp_name| String::from_str(rp_name).unwrap()),
// url: rp_url.map(|rp_url| String::from_str(rp_url).unwrap()),
// },
// user: PublicKeyCredentialUserEntity {
// id: Bytes::from_slice(&user_id).unwrap(),
// icon: maybe_random_string(),
// name: user_name.map(|user_name| String::from_str(user_name).unwrap()),
// display_name: maybe_random_string(),
// },
// creation_time,
// use_counter,
// algorithm,
// key: Key::WrappedKey(random_bytes()),
// hmac_secret: Some(false),
// cred_protect: None,
// };
// let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
// let deserialized: CredentialData = deserialize(&serialization).unwrap();
// TestResult::from_bool(credential_data == deserialized)
// }
// }
} | /// As signaled in `get_info`.
///
/// Eventual goal is full support for the CTAP2.1 specification. | random_line_split |
credential.rs | //! Internal `Credential` and external `CredentialId` ("keyhandle").
use core::cmp::Ordering;
use trussed::{client, syscall, try_syscall, types::KeyId};
pub(crate) use ctap_types::{
// authenticator::{ctap1, ctap2, Error, Request, Response},
ctap2::credential_management::CredentialProtectionPolicy,
sizes::*,
webauthn::PublicKeyCredentialDescriptor,
Bytes,
String,
};
use crate::{Authenticator, Error, Result, UserPresence};
/// As signaled in `get_info`.
///
/// Eventual goal is full support for the CTAP2.1 specification.
#[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
pub enum CtapVersion {
U2fV2,
Fido20,
Fido21Pre,
}
/// External ID of a credential, commonly known as "keyhandle".
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct CredentialId(pub Bytes<MAX_CREDENTIAL_ID_LENGTH>);
// TODO: how to determine necessary size?
// pub type SerializedCredential = Bytes<512>;
// pub type SerializedCredential = Bytes<256>;
pub(crate) type SerializedCredential = trussed::types::Message;
#[derive(Clone, Debug)]
struct EncryptedSerializedCredential(pub trussed::api::reply::Encrypt);
impl TryFrom<EncryptedSerializedCredential> for CredentialId {
type Error = Error;
fn try_from(esc: EncryptedSerializedCredential) -> Result<CredentialId> {
Ok(CredentialId(
trussed::cbor_serialize_bytes(&esc.0).map_err(|_| Error::Other)?,
))
}
}
impl TryFrom<CredentialId> for EncryptedSerializedCredential {
// tag = 16B
// nonce = 12B
type Error = Error;
fn try_from(cid: CredentialId) -> Result<EncryptedSerializedCredential> {
let encrypted_serialized_credential = EncryptedSerializedCredential(
ctap_types::serde::cbor_deserialize(&cid.0).map_err(|_| Error::InvalidCredential)?,
);
Ok(encrypted_serialized_credential)
}
}
/// Credential keys can either be "discoverable" or not.
///
/// The FIDO Alliance likes to refer to "resident keys" as "(client-side) discoverable public key
/// credential sources" now ;)
#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Key {
ResidentKey(KeyId),
// THIS USED TO BE 92 NOW IT'S 96 or 97 or so... waddup?
WrappedKey(Bytes<128>),
}
/// The main content of a `Credential`.
#[derive(
Clone, Debug, PartialEq, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed,
)]
pub struct CredentialData {
// id, name, url
pub rp: ctap_types::webauthn::PublicKeyCredentialRpEntity,
// id, icon, name, display_name
pub user: ctap_types::webauthn::PublicKeyCredentialUserEntity,
// can be just a counter, need to be able to determine "latest"
pub creation_time: u32,
// for stateless deterministic keys, it seems CTAP2 (but not CTAP1) makes signature counters optional
use_counter: bool,
// P256 or Ed25519
pub algorithm: i32,
// for RK in non-deterministic mode: refers to actual key
// TODO(implement enums in cbor-deser): for all others, is a wrapped key
// --> use above Key enum
// #[serde(skip_serializing_if = "Option::is_none")]
// key_id: Option<KeyId>,
pub key: Key,
// extensions
#[serde(skip_serializing_if = "Option::is_none")]
pub hmac_secret: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cred_protect: Option<CredentialProtectionPolicy>,
// TODO: add `sig_counter: Option<CounterId>`,
// and grant RKs a per-credential sig-counter.
}
// TODO: figure out sizes
// We may or may not follow https://github.com/satoshilabs/slips/blob/master/slip-0022.md
/// The core structure this authenticator creates and uses.
#[derive(Clone, Debug, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed)]
pub struct Credential {
ctap: CtapVersion,
pub data: CredentialData,
nonce: Bytes<12>,
}
// Alas... it would be more symmetrical to have Credential { meta, data },
// but let's not break binary compatibility for this.
//
// struct Metadata {
// ctap: CtapVersion,
// nonce: Bytes<12>,
// }
impl core::ops::Deref for Credential {
type Target = CredentialData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
/// Compare credentials based on key + timestamp.
///
/// Likely comparison based on timestamp would be good enough?
impl PartialEq for Credential {
fn eq(&self, other: &Self) -> bool {
(self.creation_time == other.creation_time) && (self.key == other.key)
}
}
impl PartialEq<&Credential> for Credential {
fn eq(&self, other: &&Self) -> bool {
self == *other
}
}
impl Eq for Credential {}
impl Ord for Credential {
fn cmp(&self, other: &Self) -> Ordering {
self.data.creation_time.cmp(&other.data.creation_time)
}
}
/// Order by timestamp of creation.
impl PartialOrd for Credential {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<&Credential> for Credential {
fn partial_cmp(&self, other: &&Self) -> Option<Ordering> {
Some(self.cmp(*other))
}
}
// Bad idea - huge stack
// pub(crate) type CredentialList = Vec<Credential, {ctap_types::sizes::MAX_CREDENTIAL_COUNT_IN_LIST}>;
impl From<CredentialId> for PublicKeyCredentialDescriptor {
fn from(id: CredentialId) -> PublicKeyCredentialDescriptor {
PublicKeyCredentialDescriptor {
id: id.0,
key_type: {
let mut key_type = String::new();
key_type.push_str("public-key").unwrap();
key_type
},
}
}
}
impl Credential {
#[allow(clippy::too_many_arguments)]
pub fn new(
ctap: CtapVersion,
// parameters: &ctap2::make_credential::Parameters,
rp: &ctap_types::webauthn::PublicKeyCredentialRpEntity,
user: &ctap_types::webauthn::PublicKeyCredentialUserEntity,
algorithm: i32,
key: Key,
timestamp: u32,
hmac_secret: Option<bool>,
cred_protect: Option<CredentialProtectionPolicy>,
nonce: [u8; 12],
) -> Self {
info!("credential for algorithm {}", algorithm);
let data = CredentialData {
rp: rp.clone(),
user: user.clone(),
creation_time: timestamp,
use_counter: true,
algorithm,
key,
hmac_secret,
cred_protect,
};
Credential {
ctap,
data,
nonce: Bytes::from_slice(&nonce).unwrap(),
}
}
// ID (or "keyhandle") for the credential.
//
// Originally, the entire data was serialized, and its encryption
// (binding RP as associated data) used as a keyhandle.
//
// However, this leads to problems with relying parties. According to the old U2F
// spec, the length of a keyhandle is encoded as one byte, whereas this procedure would
// generate keyhandles of length ~320 bytes.
//
// Therefore, inessential metadata is stripped before serialization, ensuring
// the ID will stay below 255 bytes.
//
// Existing keyhandles can still be decoded
pub fn id<T: client::Chacha8Poly1305 + client::Sha256>(
&self,
trussed: &mut T,
key_encryption_key: KeyId,
rp_id_hash: Option<&Bytes<32>>,
) -> Result<CredentialId> {
let serialized_credential = self.strip().serialize()?;
let message = &serialized_credential;
// info!("serialized cred = {:?}", message).ok();
let rp_id_hash: Bytes<32> = if let Some(hash) = rp_id_hash {
hash.clone()
} else {
syscall!(trussed.hash_sha256(self.rp.id.as_ref()))
.hash
.to_bytes()
.map_err(|_| Error::Other)?
};
let associated_data = &rp_id_hash[..];
let nonce: [u8; 12] = self.nonce.as_slice().try_into().unwrap();
let encrypted_serialized_credential = EncryptedSerializedCredential(syscall!(trussed
.encrypt_chacha8poly1305(key_encryption_key, message, associated_data, Some(&nonce))));
let credential_id: CredentialId = encrypted_serialized_credential
.try_into()
.map_err(|_| Error::RequestTooLarge)?;
Ok(credential_id)
}
pub fn serialize(&self) -> Result<SerializedCredential> {
trussed::cbor_serialize_bytes(self).map_err(|_| Error::Other)
}
pub fn deserialize(bytes: &SerializedCredential) -> Result<Self> {
match ctap_types::serde::cbor_deserialize(bytes) {
Ok(s) => Ok(s),
Err(_) => |
}
}
pub fn try_from<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
descriptor: &PublicKeyCredentialDescriptor,
) -> Result<Self> {
Self::try_from_bytes(authnr, rp_id_hash, &descriptor.id)
}
pub fn try_from_bytes<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
id: &[u8],
) -> Result<Self> {
let mut cred: Bytes<MAX_CREDENTIAL_ID_LENGTH> = Bytes::new();
cred.extend_from_slice(id)
.map_err(|_| Error::InvalidCredential)?;
let encrypted_serialized = EncryptedSerializedCredential::try_from(CredentialId(cred))?;
let kek = authnr
.state
.persistent
.key_encryption_key(&mut authnr.trussed)?;
let serialized = try_syscall!(authnr.trussed.decrypt_chacha8poly1305(
// TODO: use RpId as associated data here?
kek,
&encrypted_serialized.0.ciphertext,
&rp_id_hash[..],
&encrypted_serialized.0.nonce,
&encrypted_serialized.0.tag,
))
.map_err(|_| Error::InvalidCredential)?
.plaintext
.ok_or(Error::InvalidCredential)?;
let credential =
Credential::deserialize(&serialized).map_err(|_| Error::InvalidCredential)?;
Ok(credential)
}
// Remove inessential metadata from credential.
//
// Called by the `id` method, see its documentation.
pub fn strip(&self) -> Self {
info_now!(":: stripping ID");
let mut stripped = self.clone();
let data = &mut stripped.data;
data.rp.name = None;
data.rp.icon = None;
data.user.icon = None;
data.user.name = None;
data.user.display_name = None;
// data.hmac_secret = None;
// data.cred_protect = None;
stripped
}
}
#[cfg(test)]
mod test {
use super::*;
fn credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: String::from("John Doe"),
name: None,
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: Bytes::from_slice(&[1, 2, 3]).unwrap(),
icon: None,
name: None,
display_name: None,
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(Bytes::from_slice(&[1, 2, 3]).unwrap()),
hmac_secret: Some(false),
cred_protect: None,
}
}
fn random_bytes<const N: usize>() -> Bytes<N> {
use rand::{
distributions::{Distribution, Uniform},
rngs::OsRng,
RngCore,
};
let mut bytes = Bytes::default();
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
bytes.resize_default(n).unwrap();
OsRng.fill_bytes(&mut bytes);
bytes
}
#[allow(dead_code)]
fn maybe_random_bytes<const N: usize>() -> Option<Bytes<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1!= 0 {
Some(random_bytes())
} else {
None
}
}
fn random_string<const N: usize>() -> String<N> {
use rand::{
distributions::{Alphanumeric, Distribution, Uniform},
rngs::OsRng,
Rng,
};
use std::str::FromStr;
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
let std_string: std::string::String = OsRng
.sample_iter(&Alphanumeric)
.take(n)
.map(char::from)
.collect();
String::from_str(&std_string).unwrap()
}
fn maybe_random_string<const N: usize>() -> Option<String<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1!= 0 {
Some(random_string())
} else {
None
}
}
fn random_credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: random_string(),
name: maybe_random_string(),
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: random_bytes(), //Bytes::from_slice(&[1,2,3]).unwrap(),
icon: maybe_random_string(),
name: maybe_random_string(),
display_name: maybe_random_string(),
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(random_bytes()),
hmac_secret: Some(false),
cred_protect: None,
}
}
#[test]
fn skip_credential_data_options() {
use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
let credential_data = credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
let credential_data = random_credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
}
// use quickcheck::TestResult;
// quickcheck::quickcheck! {
// fn prop(
// rp_id: std::string::String,
// rp_name: Option<std::string::String>,
// rp_url: Option<std::string::String>,
// user_id: std::vec::Vec<u8>,
// user_name: Option<std::string::String>,
// creation_time: u32,
// use_counter: bool,
// algorithm: i32
// ) -> TestResult {
// use std::str::FromStr;
// use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
// use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
// let rp_name = &rp_name.as_ref().map(|string| string.as_str());
// let rp_url = &rp_url.as_ref().map(|string| string.as_str());
// let user_name = &user_name.as_ref().map(|string| string.as_str());
// let discard = [
// rp_id.len() > 256,
// rp_name.unwrap_or(&"").len() > 64,
// rp_url.unwrap_or(&"").len() > 64,
// user_id.len() > 64,
// user_name.unwrap_or(&"").len() > 64,
// ];
// if discard.iter().any(|&x| x) {
// return TestResult::discard();
// }
// let credential_data = CredentialData {
// rp: PublicKeyCredentialRpEntity {
// id: String::from_str(&rp_id).unwrap(),
// name: rp_name.map(|rp_name| String::from_str(rp_name).unwrap()),
// url: rp_url.map(|rp_url| String::from_str(rp_url).unwrap()),
// },
// user: PublicKeyCredentialUserEntity {
// id: Bytes::from_slice(&user_id).unwrap(),
// icon: maybe_random_string(),
// name: user_name.map(|user_name| String::from_str(user_name).unwrap()),
// display_name: maybe_random_string(),
// },
// creation_time,
// use_counter,
// algorithm,
// key: Key::WrappedKey(random_bytes()),
// hmac_secret: Some(false),
// cred_protect: None,
// };
// let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
// let deserialized: CredentialData = deserialize(&serialization).unwrap();
// TestResult::from_bool(credential_data == deserialized)
// }
// }
}
| {
info_now!("could not deserialize {:?}", bytes);
Err(Error::Other)
} | conditional_block |
lib.rs | /*
* Copyright 2015-2017 Two Pore Guys, Inc.
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
extern crate libc;
extern crate block;
use std::fmt;
use std::ffi::{CString, CStr};
use std::collections::hash_map::HashMap;
use std::os::raw::{c_char, c_void};
use std::ptr::{null, null_mut};
use std::mem::transmute;
use std::cell::RefCell;
use std::rc::Weak;
use libc::free;
use block::{Block, ConcreteBlock};
macro_rules! to_cstr {
($e:expr) => (CString::new($e).unwrap())
}
macro_rules! null_block {
() => (transmute::<*mut c_void, _>(null_mut()))
}
#[repr(C)]
#[derive(Debug)]
pub enum RawType
{
Null,
Bool,
Uint64,
Int64,
Double,
Date,
String,
Binary,
Fd,
Dictionary,
Array,
Error,
}
#[repr(C)]
#[derive(Debug)]
pub enum CallStatus
{
InProgress,
MoreAvailable,
Done,
Error,
Aborted,
Ended
}
pub enum RawObject {}
pub enum RawConnection {}
pub enum RawClient {}
pub enum RawCall {}
pub struct Object
{
value: *mut RawObject,
}
pub struct Connection
{
value: *mut RawConnection
}
pub struct Client
{
value: *mut RawClient,
connection: Connection
}
pub struct Call<'a>
{
connection: &'a Connection,
value: *mut RawCall
}
pub struct Instance<'a>
{
connection: &'a Connection,
path: String
}
pub struct Interface<'a>
{
instance: &'a Instance<'a>,
name: String
}
#[derive(Clone, Debug)]
pub enum Value
{
Null,
Bool(bool),
Uint64(u64),
Int64(i64),
Double(f64),
Date(u64),
String(String),
Binary(Vec<u8>),
Array(Vec<Value>),
Dictionary(HashMap<String, Value>),
Object(Object),
Fd(i32),
Error(Error)
}
#[derive(Clone, Debug)]
pub struct |
{
code: u32,
message: String,
stack_trace: Box<Value>,
extra: Box<Value>
}
#[link(name = "rpc")]
extern {
/* rpc/object.h */
pub fn rpc_get_type(value: *mut RawObject) -> RawType;
pub fn rpc_hash(value: *mut RawObject) -> u32;
pub fn rpc_null_create() -> *mut RawObject;
pub fn rpc_bool_create(value: bool) -> *mut RawObject;
pub fn rpc_bool_get_value(value: *mut RawObject) -> bool;
pub fn rpc_uint64_create(value: u64) -> *mut RawObject;
pub fn rpc_uint64_get_value(value: *mut RawObject) -> u64;
pub fn rpc_int64_create(value: i64) -> *mut RawObject;
pub fn rpc_int64_get_value(value: *mut RawObject) -> i64;
pub fn rpc_double_create(value: f64) -> *mut RawObject;
pub fn rpc_double_get_value(value: *mut RawObject) -> f64;
pub fn rpc_date_create(value: u64) -> *mut RawObject;
pub fn rpc_date_get_value(obj: *mut RawObject) -> u64;
pub fn rpc_string_create(value: *const c_char) -> *mut RawObject;
pub fn rpc_string_get_string_ptr(value: *mut RawObject) -> *const c_char;
pub fn rpc_data_create(ptr: *const u8, len: usize, dtor: *const c_void) -> *mut RawObject;
pub fn rpc_array_create() -> *mut RawObject;
pub fn rpc_dictionary_create() -> *mut RawObject;
pub fn rpc_array_append_value(obj: *mut RawObject, value: *mut RawObject);
pub fn rpc_dictionary_set_value(obj: *mut RawObject, key: *const c_char, value: *mut RawObject);
pub fn rpc_fd_create(value: i32) -> *mut RawObject;
pub fn rpc_fd_get_value(obj: *mut RawObject) -> i32;
pub fn rpc_copy_description(value: *mut RawObject) -> *mut c_char;
pub fn rpc_retain(value: *mut RawObject) -> *mut RawObject;
pub fn rpc_release_impl(value: *mut RawObject);
/* rpc/connection.h */
pub fn rpc_connection_call(conn: *mut RawConnection, path: *const c_char,
interface: *const c_char, name: *const c_char,
args: *const RawObject,
callback: &Block<(*mut RawCall,), bool>) -> *mut RawCall;
pub fn rpc_call_status(call: *mut RawCall) -> CallStatus;
pub fn rpc_call_result(call: *mut RawCall) -> *mut RawObject;
pub fn rpc_call_continue(call: *mut RawCall);
pub fn rpc_call_abort(call: *mut RawCall);
pub fn rpc_call_wait(call: *mut RawCall);
/* rpc/client.h */
pub fn rpc_client_create(uri: *const c_char, params: *const RawObject) -> *mut RawClient;
pub fn rpc_client_get_connection(client: *mut RawClient) -> *mut RawConnection;
}
pub trait Create<T> {
fn create(value: T) -> Object;
}
impl Clone for Object {
fn clone(&self) -> Object {
unsafe {
return Object { value: rpc_retain(self.value) }
}
}
}
impl Drop for Object {
fn drop(&mut self) {
unsafe {
rpc_release_impl(self.value)
}
}
}
impl<T> Create<T> for Object where Value: std::convert::From<T> {
fn create(value: T) -> Object {
Object::new(Value::from(value))
}
}
impl From<bool> for Value {
fn from(value: bool) -> Value {
Value::Bool(value)
}
}
impl From<u64> for Value {
fn from(value: u64) -> Value {
Value::Uint64(value)
}
}
impl From<i64> for Value {
fn from(value: i64) -> Value {
Value::Int64(value)
}
}
impl From<f64> for Value {
fn from(value: f64) -> Value {
Value::Double(value)
}
}
impl<'a> From<&'a str> for Value {
fn from(value: &str) -> Value {
Value::String(String::from(value))
}
}
impl From<String> for Value {
fn from(value: String) -> Value {
Value::String(value)
}
}
impl From<Vec<u8>> for Value {
fn from(value: Vec<u8>) -> Value {
Value::Binary(value)
}
}
impl<'a> From<&'a [Value]> for Value {
fn from(value: &[Value]) -> Value {
Value::Array(value.to_vec())
}
}
impl From<Vec<Value>> for Value {
fn from(value: Vec<Value>) -> Value {
Value::Array(value)
}
}
impl<'a> From<HashMap<&'a str, Value>> for Value {
fn from(value: HashMap<&str, Value>) -> Value {
Value::Dictionary(value.iter().map( | ( & k, v) |
(String::from(k), v.clone())
).collect())
}
}
impl From<HashMap<String, Value>> for Value {
fn from(value: HashMap<String, Value>) -> Value {
Value::Dictionary(value)
}
}
impl Object {
pub fn new(value: Value) -> Object {
unsafe {
let obj = match value {
Value::Null => rpc_null_create(),
Value::Bool(val) => rpc_bool_create(val),
Value::Uint64(val) => rpc_uint64_create(val),
Value::Int64(val) => rpc_int64_create(val),
Value::Double(val) => rpc_double_create(val),
Value::Date(val) => rpc_date_create(val),
Value::Fd(val) => rpc_fd_create(val),
Value::Binary(ref val) => rpc_data_create(val.as_ptr(), val.len(), null()),
Value::Object(ref val) => rpc_retain(val.value),
Value::String(ref val) => {
let c_val = to_cstr!(val.as_str());
rpc_string_create(c_val.as_ptr())
},
Value::Array(val) => {
let arr = rpc_array_create();
for i in val {
rpc_array_append_value(arr, Object::new(i).value);
}
arr
},
Value::Dictionary(val) => {
let dict = rpc_dictionary_create();
for (k, v) in val {
let c_key = to_cstr!(k.as_str());
rpc_dictionary_set_value(dict, c_key.as_ptr(), Object::new(v).value);
}
dict
},
Value::Error(val) => {
rpc_null_create()
}
};
return Object { value: obj };
}
}
pub fn get_raw_type(&self) -> RawType {
unsafe {
rpc_get_type(self.value)
}
}
pub fn unpack(&self) -> Value {
unsafe {
match self.get_raw_type() {
RawType::Null => Value::Null,
RawType::Bool => Value::Bool(rpc_bool_get_value(self.value)),
RawType::Uint64 => Value::Uint64(rpc_uint64_get_value(self.value)),
RawType::Int64 => Value::Int64(rpc_int64_get_value(self.value)),
RawType::Double => Value::Double(rpc_double_get_value(self.value)),
RawType::String => Value::String(String::from(CStr::from_ptr(
rpc_string_get_string_ptr(self.value)).to_str().unwrap())),
RawType::Date => Value::Date(rpc_date_get_value(self.value)),
RawType::Binary => Value::Null,
RawType::Fd => Value::Fd(rpc_fd_get_value(self.value)),
RawType::Array => Value::Null,
RawType::Dictionary => Value::Null,
RawType::Error => Value::Null,
}
}
}
}
impl std::hash::Hash for Object {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
}
}
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let descr = rpc_copy_description(self.value);
let str = CString::from_raw(descr);
let result = f.write_str(str.to_str().unwrap());
free(descr as *mut libc::c_void);
result
}
}
}
impl<'a> Call<'a> {
pub fn result(&self) -> Option<Value> {
unsafe {
let result = rpc_call_result(self.value);
match result.is_null() {
true => Option::None,
false => Option::Some(Object { value: result }.unpack())
}
}
}
pub fn status(&self) -> CallStatus {
unsafe {
rpc_call_status(self.value)
}
}
pub fn abort(&mut self) {
unsafe {
rpc_call_abort(self.value);
}
}
pub fn resume(&mut self) {
}
pub fn wait(&mut self) {
unsafe {
rpc_call_wait(self.value);
}
}
}
impl Connection {
pub fn call(&self, name: &str, path: &str, interface: &str, args: &[Value]) -> Call {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let call = rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, null_block!()
);
Call { value: call, connection: self }
}
}
pub fn call_sync(&self, name: &str, path: &str, interface: &str,
args: &[Value]) -> Option<Value> {
let mut c = self.call(name, path, interface, args);
c.wait();
c.result()
}
pub fn call_async(&self, name: &str, path: &str, interface: &str, args: &[Value],
callback: Box<Fn(&Call) -> bool>) {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let block = ConcreteBlock::new(move |raw_call| {
let call = Call { connection: self, value: raw_call };
callback(&call)
});
rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, &block
);
}
}
}
impl Client {
pub fn connect(uri: &str) -> Client {
unsafe {
let c_uri = to_cstr!(uri);
let client = rpc_client_create(c_uri.as_ptr(), null());
Client {
value: client,
connection: Connection { value: rpc_client_get_connection(client)}
}
}
}
pub fn connection(&self) -> &Connection {
&self.connection
}
pub fn instance(&self, path: &str) -> Instance {
Instance { connection: &self.connection(), path: String::from(path) }
}
}
impl<'a> Instance<'a> {
pub fn interfaces(&self) -> HashMap<String, Interface> {
self.connection.call_sync(
"get_interfaces",
self.path.as_str(),
"com.twoporeguys.librpc.Introspectable",
&[][..]
).unwrap()
}
pub fn interface(&self, name: &str) -> Interface {
Interface { instance: self, name: String::from(name) }
}
}
impl Interface {
pub fn call(method: &str, args: &[&Value]) -> Call {
}
pub fn call_sync(method: &str, args: &[&Value]) -> Result<Value> {
}
pub fn get(property: &str) -> Result<Value> {
}
pub fn set(property: &str, value: &Value) -> Result<()> {
}
}
| Error | identifier_name |
lib.rs | /*
* Copyright 2015-2017 Two Pore Guys, Inc.
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
extern crate libc;
extern crate block;
use std::fmt;
use std::ffi::{CString, CStr};
use std::collections::hash_map::HashMap;
use std::os::raw::{c_char, c_void};
use std::ptr::{null, null_mut};
use std::mem::transmute;
use std::cell::RefCell;
use std::rc::Weak;
use libc::free;
use block::{Block, ConcreteBlock};
macro_rules! to_cstr {
($e:expr) => (CString::new($e).unwrap())
}
macro_rules! null_block {
() => (transmute::<*mut c_void, _>(null_mut()))
}
#[repr(C)]
#[derive(Debug)]
pub enum RawType
{
Null,
Bool, | Uint64,
Int64,
Double,
Date,
String,
Binary,
Fd,
Dictionary,
Array,
Error,
}
#[repr(C)]
#[derive(Debug)]
pub enum CallStatus
{
InProgress,
MoreAvailable,
Done,
Error,
Aborted,
Ended
}
pub enum RawObject {}
pub enum RawConnection {}
pub enum RawClient {}
pub enum RawCall {}
pub struct Object
{
value: *mut RawObject,
}
pub struct Connection
{
value: *mut RawConnection
}
pub struct Client
{
value: *mut RawClient,
connection: Connection
}
pub struct Call<'a>
{
connection: &'a Connection,
value: *mut RawCall
}
pub struct Instance<'a>
{
connection: &'a Connection,
path: String
}
pub struct Interface<'a>
{
instance: &'a Instance<'a>,
name: String
}
#[derive(Clone, Debug)]
pub enum Value
{
Null,
Bool(bool),
Uint64(u64),
Int64(i64),
Double(f64),
Date(u64),
String(String),
Binary(Vec<u8>),
Array(Vec<Value>),
Dictionary(HashMap<String, Value>),
Object(Object),
Fd(i32),
Error(Error)
}
#[derive(Clone, Debug)]
pub struct Error
{
code: u32,
message: String,
stack_trace: Box<Value>,
extra: Box<Value>
}
#[link(name = "rpc")]
extern {
/* rpc/object.h */
pub fn rpc_get_type(value: *mut RawObject) -> RawType;
pub fn rpc_hash(value: *mut RawObject) -> u32;
pub fn rpc_null_create() -> *mut RawObject;
pub fn rpc_bool_create(value: bool) -> *mut RawObject;
pub fn rpc_bool_get_value(value: *mut RawObject) -> bool;
pub fn rpc_uint64_create(value: u64) -> *mut RawObject;
pub fn rpc_uint64_get_value(value: *mut RawObject) -> u64;
pub fn rpc_int64_create(value: i64) -> *mut RawObject;
pub fn rpc_int64_get_value(value: *mut RawObject) -> i64;
pub fn rpc_double_create(value: f64) -> *mut RawObject;
pub fn rpc_double_get_value(value: *mut RawObject) -> f64;
pub fn rpc_date_create(value: u64) -> *mut RawObject;
pub fn rpc_date_get_value(obj: *mut RawObject) -> u64;
pub fn rpc_string_create(value: *const c_char) -> *mut RawObject;
pub fn rpc_string_get_string_ptr(value: *mut RawObject) -> *const c_char;
pub fn rpc_data_create(ptr: *const u8, len: usize, dtor: *const c_void) -> *mut RawObject;
pub fn rpc_array_create() -> *mut RawObject;
pub fn rpc_dictionary_create() -> *mut RawObject;
pub fn rpc_array_append_value(obj: *mut RawObject, value: *mut RawObject);
pub fn rpc_dictionary_set_value(obj: *mut RawObject, key: *const c_char, value: *mut RawObject);
pub fn rpc_fd_create(value: i32) -> *mut RawObject;
pub fn rpc_fd_get_value(obj: *mut RawObject) -> i32;
pub fn rpc_copy_description(value: *mut RawObject) -> *mut c_char;
pub fn rpc_retain(value: *mut RawObject) -> *mut RawObject;
pub fn rpc_release_impl(value: *mut RawObject);
/* rpc/connection.h */
pub fn rpc_connection_call(conn: *mut RawConnection, path: *const c_char,
interface: *const c_char, name: *const c_char,
args: *const RawObject,
callback: &Block<(*mut RawCall,), bool>) -> *mut RawCall;
pub fn rpc_call_status(call: *mut RawCall) -> CallStatus;
pub fn rpc_call_result(call: *mut RawCall) -> *mut RawObject;
pub fn rpc_call_continue(call: *mut RawCall);
pub fn rpc_call_abort(call: *mut RawCall);
pub fn rpc_call_wait(call: *mut RawCall);
/* rpc/client.h */
pub fn rpc_client_create(uri: *const c_char, params: *const RawObject) -> *mut RawClient;
pub fn rpc_client_get_connection(client: *mut RawClient) -> *mut RawConnection;
}
pub trait Create<T> {
fn create(value: T) -> Object;
}
impl Clone for Object {
fn clone(&self) -> Object {
unsafe {
return Object { value: rpc_retain(self.value) }
}
}
}
impl Drop for Object {
fn drop(&mut self) {
unsafe {
rpc_release_impl(self.value)
}
}
}
impl<T> Create<T> for Object where Value: std::convert::From<T> {
fn create(value: T) -> Object {
Object::new(Value::from(value))
}
}
impl From<bool> for Value {
fn from(value: bool) -> Value {
Value::Bool(value)
}
}
impl From<u64> for Value {
fn from(value: u64) -> Value {
Value::Uint64(value)
}
}
impl From<i64> for Value {
fn from(value: i64) -> Value {
Value::Int64(value)
}
}
impl From<f64> for Value {
fn from(value: f64) -> Value {
Value::Double(value)
}
}
impl<'a> From<&'a str> for Value {
fn from(value: &str) -> Value {
Value::String(String::from(value))
}
}
impl From<String> for Value {
fn from(value: String) -> Value {
Value::String(value)
}
}
impl From<Vec<u8>> for Value {
fn from(value: Vec<u8>) -> Value {
Value::Binary(value)
}
}
impl<'a> From<&'a [Value]> for Value {
fn from(value: &[Value]) -> Value {
Value::Array(value.to_vec())
}
}
impl From<Vec<Value>> for Value {
fn from(value: Vec<Value>) -> Value {
Value::Array(value)
}
}
impl<'a> From<HashMap<&'a str, Value>> for Value {
fn from(value: HashMap<&str, Value>) -> Value {
Value::Dictionary(value.iter().map( | ( & k, v) |
(String::from(k), v.clone())
).collect())
}
}
impl From<HashMap<String, Value>> for Value {
fn from(value: HashMap<String, Value>) -> Value {
Value::Dictionary(value)
}
}
impl Object {
pub fn new(value: Value) -> Object {
unsafe {
let obj = match value {
Value::Null => rpc_null_create(),
Value::Bool(val) => rpc_bool_create(val),
Value::Uint64(val) => rpc_uint64_create(val),
Value::Int64(val) => rpc_int64_create(val),
Value::Double(val) => rpc_double_create(val),
Value::Date(val) => rpc_date_create(val),
Value::Fd(val) => rpc_fd_create(val),
Value::Binary(ref val) => rpc_data_create(val.as_ptr(), val.len(), null()),
Value::Object(ref val) => rpc_retain(val.value),
Value::String(ref val) => {
let c_val = to_cstr!(val.as_str());
rpc_string_create(c_val.as_ptr())
},
Value::Array(val) => {
let arr = rpc_array_create();
for i in val {
rpc_array_append_value(arr, Object::new(i).value);
}
arr
},
Value::Dictionary(val) => {
let dict = rpc_dictionary_create();
for (k, v) in val {
let c_key = to_cstr!(k.as_str());
rpc_dictionary_set_value(dict, c_key.as_ptr(), Object::new(v).value);
}
dict
},
Value::Error(val) => {
rpc_null_create()
}
};
return Object { value: obj };
}
}
pub fn get_raw_type(&self) -> RawType {
unsafe {
rpc_get_type(self.value)
}
}
pub fn unpack(&self) -> Value {
unsafe {
match self.get_raw_type() {
RawType::Null => Value::Null,
RawType::Bool => Value::Bool(rpc_bool_get_value(self.value)),
RawType::Uint64 => Value::Uint64(rpc_uint64_get_value(self.value)),
RawType::Int64 => Value::Int64(rpc_int64_get_value(self.value)),
RawType::Double => Value::Double(rpc_double_get_value(self.value)),
RawType::String => Value::String(String::from(CStr::from_ptr(
rpc_string_get_string_ptr(self.value)).to_str().unwrap())),
RawType::Date => Value::Date(rpc_date_get_value(self.value)),
RawType::Binary => Value::Null,
RawType::Fd => Value::Fd(rpc_fd_get_value(self.value)),
RawType::Array => Value::Null,
RawType::Dictionary => Value::Null,
RawType::Error => Value::Null,
}
}
}
}
impl std::hash::Hash for Object {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
}
}
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let descr = rpc_copy_description(self.value);
let str = CString::from_raw(descr);
let result = f.write_str(str.to_str().unwrap());
free(descr as *mut libc::c_void);
result
}
}
}
impl<'a> Call<'a> {
pub fn result(&self) -> Option<Value> {
unsafe {
let result = rpc_call_result(self.value);
match result.is_null() {
true => Option::None,
false => Option::Some(Object { value: result }.unpack())
}
}
}
pub fn status(&self) -> CallStatus {
unsafe {
rpc_call_status(self.value)
}
}
pub fn abort(&mut self) {
unsafe {
rpc_call_abort(self.value);
}
}
pub fn resume(&mut self) {
}
pub fn wait(&mut self) {
unsafe {
rpc_call_wait(self.value);
}
}
}
impl Connection {
pub fn call(&self, name: &str, path: &str, interface: &str, args: &[Value]) -> Call {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let call = rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, null_block!()
);
Call { value: call, connection: self }
}
}
pub fn call_sync(&self, name: &str, path: &str, interface: &str,
args: &[Value]) -> Option<Value> {
let mut c = self.call(name, path, interface, args);
c.wait();
c.result()
}
pub fn call_async(&self, name: &str, path: &str, interface: &str, args: &[Value],
callback: Box<Fn(&Call) -> bool>) {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let block = ConcreteBlock::new(move |raw_call| {
let call = Call { connection: self, value: raw_call };
callback(&call)
});
rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, &block
);
}
}
}
impl Client {
pub fn connect(uri: &str) -> Client {
unsafe {
let c_uri = to_cstr!(uri);
let client = rpc_client_create(c_uri.as_ptr(), null());
Client {
value: client,
connection: Connection { value: rpc_client_get_connection(client)}
}
}
}
pub fn connection(&self) -> &Connection {
&self.connection
}
pub fn instance(&self, path: &str) -> Instance {
Instance { connection: &self.connection(), path: String::from(path) }
}
}
impl<'a> Instance<'a> {
pub fn interfaces(&self) -> HashMap<String, Interface> {
self.connection.call_sync(
"get_interfaces",
self.path.as_str(),
"com.twoporeguys.librpc.Introspectable",
&[][..]
).unwrap()
}
pub fn interface(&self, name: &str) -> Interface {
Interface { instance: self, name: String::from(name) }
}
}
impl Interface {
pub fn call(method: &str, args: &[&Value]) -> Call {
}
pub fn call_sync(method: &str, args: &[&Value]) -> Result<Value> {
}
pub fn get(property: &str) -> Result<Value> {
}
pub fn set(property: &str, value: &Value) -> Result<()> {
}
} | random_line_split |
|
type_check.rs | use std::collections::HashMap;
use std::rc::Rc;
use parsing::{AST, Statement, Declaration, Signature, Expression, ExpressionType, Operation, Variant, TypeName, TypeSingletonName};
// from Niko's talk
/* fn type_check(expression, expected_ty) -> Ty {
let ty = bare_type_check(expression, expected_type);
if ty icompatible with expected_ty {
try_coerce(expression, ty, expected_ty)
} else {
ty
}
}
fn bare_type_check(exprssion, expected_type) -> Ty {... }
*/
/* H-M ALGO NOTES
from https://www.youtube.com/watch?v=il3gD7XMdmA
(also check out http://dev.stephendiehl.com/fun/006_hindley_milner.html)
typeInfer :: Expr a -> Matching (Type a)
unify :: Type a -> Type b -> Matching (Type c)
(Matching a) is a monad in which unification is done
ex:
typeInfer (If e1 e2 e3) = do
t1 <- typeInfer e1
t2 <- typeInfer e2
t3 <- typeInfer e3
_ <- unify t1 BoolType
unify t2 t3 -- b/c t2 and t3 have to be the same type
typeInfer (Const (ConstInt _)) = IntType -- same for other literals
--function application
typeInfer (Apply f x) = do
tf <- typeInfer f
tx <- typeInfer x
case tf of
FunctionType t1 t2 -> do
_ <- unify t1 tx
return t2
_ -> fail "Not a function"
--type annotation
typeInfer (Typed x t) = do
tx <- typeInfer x
unify tx t
--variable and let expressions - need to pass around a map of variable names to types here
typeInfer :: [ (Var, Type Var) ] -> Expr Var -> Matching (Type Var)
typeInfer ctx (Var x) = case (lookup x ctx) of
Just t -> return t
Nothing -> fail "Unknown variable"
--let x = e1 in e2
typeInfer ctx (Let x e1 e2) = do
t1 <- typeInfer ctx e1
typeInfer ((x, t1) :: ctx) e2
--lambdas are complicated (this represents ʎx.e)
typeInfer ctx (Lambda x e) = do
t1 <- allocExistentialVariable
t2 <- typeInfer ((x, t1) :: ctx) e
return $ FunctionType t1 t2 -- ie. t1 -> t2
--to solve the problem of map :: (a -> b) -> [a] -> [b]
when we use a variable whose type has universal tvars, convert those universal
tvars to existential ones
-and each distinct universal tvar needs to map to the same existential type
-so we change typeinfer:
typeInfer ctx (Var x) = do
case (lookup x ctx) of
Nothing ->...
Just t -> do
let uvars = nub (toList t) -- nub removes duplicates, so this gets unique universally quantified variables
evars <- mapM (const allocExistentialVariable) uvars
let varMap = zip uvars evars
let vixVar varMap v = fromJust $ lookup v varMap
return (fmap (fixVar varMap) t)
--how do we define unify??
-recall, type signature is:
unify :: Type a -> Type b -> Matching (Type c)
unify BoolType BoolType = BoolType --easy, same for all constants
unify (FunctionType t1 t2) (FunctionType t3 t4) = do
t5 <- unify t1 t3
t6 <- unify t2 t4
return $ FunctionType t5 t6
unify (TVar a) (TVar b) = if a == b then TVar a else fail
--existential types can be assigned another type at most once
--some complicated stuff about hanlding existential types
--everything else is a type error
unify a b = fail
SKOLEMIZATION - how you prevent an unassigned existential type variable from leaking!
-before a type gets to global scope, replace all unassigned existential vars w/ new unique universal
type variables
*/
#[derive(Debug, PartialEq, Clone)]
pub enum Type {
TVar(TypeVar),
TConst(TypeConst),
TFunc(Box<Type>, Box<Type>),
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeVar {
Univ(Rc<String>),
Exist(u64),
}
impl TypeVar {
fn univ(label: &str) -> TypeVar {
TypeVar::Univ(Rc::new(label.to_string()))
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeConst {
UserT(Rc<String>),
Integer,
Float,
StringT,
Boolean,
Unit,
Bottom,
}
type TypeCheckResult = Result<Type, String>;
#[derive(Debug, PartialEq, Eq, Hash)]
struct PathSpecifier(Rc<String>);
#[derive(Debug, PartialEq, Clone)]
struct TypeContextEntry {
ty: Type,
constant: bool
}
pub struct T | {
symbol_table: HashMap<PathSpecifier, TypeContextEntry>,
evar_table: HashMap<u64, Type>,
existential_type_label_count: u64
}
impl TypeContext {
pub fn new() -> TypeContext {
TypeContext {
symbol_table: HashMap::new(),
evar_table: HashMap::new(),
existential_type_label_count: 0,
}
}
pub fn add_symbols(&mut self, ast: &AST) {
use self::Declaration::*;
use self::Type::*;
use self::TypeConst::*;
for statement in ast.0.iter() {
match *statement {
Statement::ExpressionStatement(_) => (),
Statement::Declaration(ref decl) => match *decl {
FuncSig(_) => (),
Impl {.. } => (),
TypeDecl(ref type_constructor, ref body) => {
for variant in body.0.iter() {
let (spec, ty) = match variant {
&Variant::UnitStruct(ref data_constructor) => {
let spec = PathSpecifier(data_constructor.clone());
let ty = TConst(UserT(type_constructor.name.clone()));
(spec, ty)
},
&Variant::TupleStruct(ref data_construcor, ref args) => {
//TODO fix
let arg = args.get(0).unwrap();
let type_arg = self.from_anno(arg);
let spec = PathSpecifier(data_construcor.clone());
let ty = TFunc(Box::new(type_arg), Box::new(TConst(UserT(type_constructor.name.clone()))));
(spec, ty)
},
&Variant::Record(_, _) => unimplemented!(),
};
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
}
},
TypeAlias {.. } => (),
Binding {ref name, ref constant, ref expr} => {
let spec = PathSpecifier(name.clone());
let ty = expr.1.as_ref()
.map(|ty| self.from_anno(ty))
.unwrap_or_else(|| { self.alloc_existential_type() }); // this call to alloc_existential is OK b/c a binding only ever has one type, so if the annotation is absent, it's fine to just make one de novo
let entry = TypeContextEntry { ty, constant: *constant };
self.symbol_table.insert(spec, entry);
},
FuncDecl(ref signature, _) => {
let spec = PathSpecifier(signature.name.clone());
let ty = self.from_signature(signature);
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
},
}
}
}
}
fn lookup(&mut self, binding: &Rc<String>) -> Option<TypeContextEntry> {
let key = PathSpecifier(binding.clone());
self.symbol_table.get(&key).map(|entry| entry.clone())
}
pub fn debug_symbol_table(&self) -> String {
format!("Symbol table:\n {:?}\nEvar table:\n{:?}", self.symbol_table, self.evar_table)
}
fn alloc_existential_type(&mut self) -> Type {
let ret = Type::TVar(TypeVar::Exist(self.existential_type_label_count));
self.existential_type_label_count += 1;
ret
}
fn from_anno(&mut self, anno: &TypeName) -> Type {
use self::Type::*;
use self::TypeConst::*;
match anno {
&TypeName::Singleton(TypeSingletonName { ref name,.. }) => {
match name.as_ref().as_ref() {
"Int" => TConst(Integer),
"Float" => TConst(Float),
"Bool" => TConst(Boolean),
"String" => TConst(StringT),
s => TVar(TypeVar::Univ(Rc::new(format!("{}",s)))),
}
},
&TypeName::Tuple(ref items) => {
if items.len() == 1 {
TConst(Unit)
} else {
TConst(Bottom)
}
}
}
}
fn from_signature(&mut self, sig: &Signature) -> Type {
use self::Type::*;
use self::TypeConst::*;
//TODO this won't work properly until you make sure that all (universal) type vars in the function have the same existential type var
// actually this should never even put existential types into the symbol table at all
//this will crash if more than 5 arg function is used
let names = vec!["a", "b", "c", "d", "e", "f"];
let mut idx = 0;
let mut get_type = || { let q = TVar(TypeVar::Univ(Rc::new(format!("{}", names.get(idx).unwrap())))); idx += 1; q };
let return_type = sig.type_anno.as_ref().map(|anno| self.from_anno(&anno)).unwrap_or_else(|| { get_type() });
if sig.params.len() == 0 {
TFunc(Box::new(TConst(Unit)), Box::new(return_type))
} else {
let mut output_type = return_type;
for p in sig.params.iter() {
let p_type = p.1.as_ref().map(|anno| self.from_anno(anno)).unwrap_or_else(|| { get_type() });
output_type = TFunc(Box::new(p_type), Box::new(output_type));
}
output_type
}
}
pub fn type_check(&mut self, ast: &AST) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
let mut last = TConst(Unit);
for statement in ast.0.iter() {
match statement {
&Statement::Declaration(ref _decl) => {
//return Err(format!("Declarations not supported"));
},
&Statement::ExpressionStatement(ref expr) => {
last = self.infer(expr)?;
}
}
}
Ok(last)
}
fn infer(&mut self, expr: &Expression) -> TypeCheckResult {
match (&expr.0, &expr.1) {
(exprtype, &Some(ref anno)) => {
let tx = self.infer_no_anno(exprtype)?;
let ty = self.from_anno(anno);
self.unify(tx, ty)
},
(exprtype, &None) => self.infer_no_anno(exprtype),
}
}
fn infer_no_anno(&mut self, ex: &ExpressionType) -> TypeCheckResult {
use self::ExpressionType::*;
use self::Type::*;
use self::TypeConst::*;
Ok(match ex {
&IntLiteral(_) => TConst(Integer),
&FloatLiteral(_) => TConst(Float),
&StringLiteral(_) => TConst(StringT),
&BoolLiteral(_) => TConst(Boolean),
&Value(ref name, _) => {
self.lookup(name)
.map(|entry| entry.ty)
.ok_or(format!("Couldn't find {}", name))?
},
&BinExp(ref op, ref lhs, ref rhs) => {
let t_lhs = self.infer(lhs)?;
match self.infer_op(op)? {
TFunc(t1, t2) => {
let _ = self.unify(t_lhs, *t1)?;
let t_rhs = self.infer(rhs)?;
let x = *t2;
match x {
TFunc(t3, t4) => {
let _ = self.unify(t_rhs, *t3)?;
*t4
},
_ => return Err(format!("Not a function type either")),
}
},
_ => return Err(format!("Op {:?} is not a function type", op)),
}
},
&Call { ref f, ref arguments } => {
let tf = self.infer(f)?;
let targ = self.infer(arguments.get(0).unwrap())?;
match tf {
TFunc(box t1, box t2) => {
let _ = self.unify(t1, targ)?;
t2
},
_ => return Err(format!("Not a function!")),
}
},
_ => TConst(Bottom),
})
}
fn infer_op(&mut self, op: &Operation) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
macro_rules! binoptype {
($lhs:expr, $rhs:expr, $out:expr) => { TFunc(Box::new($lhs), Box::new(TFunc(Box::new($rhs), Box::new($out)))) };
}
Ok(match (*op.0).as_ref() {
"+" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"++" => binoptype!(TConst(StringT), TConst(StringT), TConst(StringT)),
"-" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"*" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"/" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"%" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
_ => TConst(Bottom)
})
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeCheckResult {
use self::Type::*;
use self::TypeVar::*;
println!("Calling unify with `{:?}` and `{:?}`", t1, t2);
match (&t1, &t2) {
(&TConst(ref c1), &TConst(ref c2)) if c1 == c2 => Ok(TConst(c1.clone())),
(&TFunc(ref t1, ref t2), &TFunc(ref t3, ref t4)) => {
let t5 = self.unify(*t1.clone().clone(), *t3.clone().clone())?;
let t6 = self.unify(*t2.clone().clone(), *t4.clone().clone())?;
Ok(TFunc(Box::new(t5), Box::new(t6)))
},
(&TVar(Univ(ref a)), &TVar(Univ(ref b))) => {
if a == b {
Ok(TVar(Univ(a.clone())))
} else {
Err(format!("Couldn't unify universal types {} and {}", a, b))
}
},
//the interesting case!!
(&TVar(Exist(ref a)), ref t2) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t1) => self.unify(t1.clone().clone(), t2.clone().clone()),
None => {
self.evar_table.insert(*a, t2.clone().clone());
Ok(t2.clone().clone())
}
}
},
(ref t1, &TVar(Exist(ref a))) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t2) => self.unify(t2.clone().clone(), t1.clone().clone()),
None => {
self.evar_table.insert(*a, t1.clone().clone());
Ok(t1.clone().clone())
}
}
},
_ => Err(format!("Types {:?} and {:?} don't unify", t1, t2))
}
}
}
#[cfg(test)]
mod tests {
use super::{Type, TypeVar, TypeConst, TypeContext};
use super::Type::*;
use super::TypeConst::*;
use schala_lang::parsing::{parse, tokenize};
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let mut tc = TypeContext::new();
let ast = parse(tokenize($input)).0.unwrap() ;
tc.add_symbols(&ast);
assert_eq!($correct, tc.type_check(&ast).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", TConst(Integer));
type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
}
| ypeContext | identifier_name |
type_check.rs | use std::collections::HashMap;
use std::rc::Rc;
use parsing::{AST, Statement, Declaration, Signature, Expression, ExpressionType, Operation, Variant, TypeName, TypeSingletonName};
// from Niko's talk
/* fn type_check(expression, expected_ty) -> Ty {
let ty = bare_type_check(expression, expected_type);
if ty icompatible with expected_ty {
try_coerce(expression, ty, expected_ty)
} else {
ty
}
}
fn bare_type_check(exprssion, expected_type) -> Ty {... }
*/
/* H-M ALGO NOTES
from https://www.youtube.com/watch?v=il3gD7XMdmA
(also check out http://dev.stephendiehl.com/fun/006_hindley_milner.html)
typeInfer :: Expr a -> Matching (Type a)
unify :: Type a -> Type b -> Matching (Type c)
(Matching a) is a monad in which unification is done
ex:
typeInfer (If e1 e2 e3) = do
t1 <- typeInfer e1
t2 <- typeInfer e2
t3 <- typeInfer e3
_ <- unify t1 BoolType
unify t2 t3 -- b/c t2 and t3 have to be the same type
typeInfer (Const (ConstInt _)) = IntType -- same for other literals
--function application
typeInfer (Apply f x) = do
tf <- typeInfer f
tx <- typeInfer x
case tf of
FunctionType t1 t2 -> do
_ <- unify t1 tx
return t2
_ -> fail "Not a function"
--type annotation
typeInfer (Typed x t) = do
tx <- typeInfer x
unify tx t
--variable and let expressions - need to pass around a map of variable names to types here
typeInfer :: [ (Var, Type Var) ] -> Expr Var -> Matching (Type Var)
typeInfer ctx (Var x) = case (lookup x ctx) of
Just t -> return t
Nothing -> fail "Unknown variable"
--let x = e1 in e2
typeInfer ctx (Let x e1 e2) = do
t1 <- typeInfer ctx e1
typeInfer ((x, t1) :: ctx) e2
--lambdas are complicated (this represents ʎx.e)
typeInfer ctx (Lambda x e) = do
t1 <- allocExistentialVariable
t2 <- typeInfer ((x, t1) :: ctx) e
return $ FunctionType t1 t2 -- ie. t1 -> t2
--to solve the problem of map :: (a -> b) -> [a] -> [b]
when we use a variable whose type has universal tvars, convert those universal
tvars to existential ones
-and each distinct universal tvar needs to map to the same existential type
-so we change typeinfer:
typeInfer ctx (Var x) = do
case (lookup x ctx) of
Nothing ->...
Just t -> do
let uvars = nub (toList t) -- nub removes duplicates, so this gets unique universally quantified variables
evars <- mapM (const allocExistentialVariable) uvars
let varMap = zip uvars evars
let vixVar varMap v = fromJust $ lookup v varMap
return (fmap (fixVar varMap) t)
--how do we define unify??
-recall, type signature is:
unify :: Type a -> Type b -> Matching (Type c)
unify BoolType BoolType = BoolType --easy, same for all constants
unify (FunctionType t1 t2) (FunctionType t3 t4) = do
t5 <- unify t1 t3
t6 <- unify t2 t4
return $ FunctionType t5 t6
unify (TVar a) (TVar b) = if a == b then TVar a else fail
--existential types can be assigned another type at most once
--some complicated stuff about hanlding existential types
--everything else is a type error
unify a b = fail
SKOLEMIZATION - how you prevent an unassigned existential type variable from leaking!
-before a type gets to global scope, replace all unassigned existential vars w/ new unique universal
type variables
*/
#[derive(Debug, PartialEq, Clone)]
pub enum Type {
TVar(TypeVar),
TConst(TypeConst),
TFunc(Box<Type>, Box<Type>),
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeVar {
Univ(Rc<String>),
Exist(u64),
}
impl TypeVar {
fn univ(label: &str) -> TypeVar {
TypeVar::Univ(Rc::new(label.to_string()))
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeConst {
UserT(Rc<String>),
Integer,
Float,
StringT,
Boolean,
Unit,
Bottom,
}
type TypeCheckResult = Result<Type, String>;
#[derive(Debug, PartialEq, Eq, Hash)]
struct PathSpecifier(Rc<String>);
#[derive(Debug, PartialEq, Clone)]
struct TypeContextEntry {
ty: Type,
constant: bool
}
pub struct TypeContext {
symbol_table: HashMap<PathSpecifier, TypeContextEntry>,
evar_table: HashMap<u64, Type>,
existential_type_label_count: u64
}
impl TypeContext {
pub fn new() -> TypeContext {
TypeContext {
symbol_table: HashMap::new(),
evar_table: HashMap::new(),
existential_type_label_count: 0,
}
}
pub fn add_symbols(&mut self, ast: &AST) {
use self::Declaration::*;
use self::Type::*;
use self::TypeConst::*;
for statement in ast.0.iter() {
match *statement {
Statement::ExpressionStatement(_) => (),
Statement::Declaration(ref decl) => match *decl {
FuncSig(_) => (),
Impl {.. } => (),
TypeDecl(ref type_constructor, ref body) => {
for variant in body.0.iter() {
let (spec, ty) = match variant {
&Variant::UnitStruct(ref data_constructor) => {
let spec = PathSpecifier(data_constructor.clone());
let ty = TConst(UserT(type_constructor.name.clone()));
(spec, ty)
},
&Variant::TupleStruct(ref data_construcor, ref args) => {
//TODO fix
let arg = args.get(0).unwrap();
let type_arg = self.from_anno(arg);
let spec = PathSpecifier(data_construcor.clone());
let ty = TFunc(Box::new(type_arg), Box::new(TConst(UserT(type_constructor.name.clone()))));
(spec, ty)
},
&Variant::Record(_, _) => unimplemented!(),
};
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
}
},
TypeAlias {.. } => (),
Binding {ref name, ref constant, ref expr} => {
let spec = PathSpecifier(name.clone());
let ty = expr.1.as_ref()
.map(|ty| self.from_anno(ty))
.unwrap_or_else(|| { self.alloc_existential_type() }); // this call to alloc_existential is OK b/c a binding only ever has one type, so if the annotation is absent, it's fine to just make one de novo
let entry = TypeContextEntry { ty, constant: *constant };
self.symbol_table.insert(spec, entry);
},
FuncDecl(ref signature, _) => {
let spec = PathSpecifier(signature.name.clone());
let ty = self.from_signature(signature);
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
},
}
}
}
}
fn lookup(&mut self, binding: &Rc<String>) -> Option<TypeContextEntry> {
let key = PathSpecifier(binding.clone());
self.symbol_table.get(&key).map(|entry| entry.clone())
}
pub fn debug_symbol_table(&self) -> String {
format!("Symbol table:\n {:?}\nEvar table:\n{:?}", self.symbol_table, self.evar_table)
}
fn alloc_existential_type(&mut self) -> Type {
let ret = Type::TVar(TypeVar::Exist(self.existential_type_label_count));
self.existential_type_label_count += 1;
ret
}
fn from_anno(&mut self, anno: &TypeName) -> Type {
use self::Type::*;
use self::TypeConst::*;
match anno {
&TypeName::Singleton(TypeSingletonName { ref name,.. }) => {
match name.as_ref().as_ref() {
"Int" => TConst(Integer),
"Float" => TConst(Float),
"Bool" => TConst(Boolean),
"String" => TConst(StringT),
s => TVar(TypeVar::Univ(Rc::new(format!("{}",s)))),
}
},
&TypeName::Tuple(ref items) => {
if items.len() == 1 {
TConst(Unit)
} else {
TConst(Bottom)
}
}
}
}
fn from_signature(&mut self, sig: &Signature) -> Type {
use self::Type::*;
use self::TypeConst::*;
//TODO this won't work properly until you make sure that all (universal) type vars in the function have the same existential type var
// actually this should never even put existential types into the symbol table at all
//this will crash if more than 5 arg function is used
let names = vec!["a", "b", "c", "d", "e", "f"];
let mut idx = 0;
let mut get_type = || { let q = TVar(TypeVar::Univ(Rc::new(format!("{}", names.get(idx).unwrap())))); idx += 1; q };
let return_type = sig.type_anno.as_ref().map(|anno| self.from_anno(&anno)).unwrap_or_else(|| { get_type() });
if sig.params.len() == 0 {
TFunc(Box::new(TConst(Unit)), Box::new(return_type))
} else {
let mut output_type = return_type;
for p in sig.params.iter() {
let p_type = p.1.as_ref().map(|anno| self.from_anno(anno)).unwrap_or_else(|| { get_type() });
output_type = TFunc(Box::new(p_type), Box::new(output_type));
}
output_type
}
}
pub fn type_check(&mut self, ast: &AST) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
let mut last = TConst(Unit);
for statement in ast.0.iter() {
match statement {
&Statement::Declaration(ref _decl) => {
//return Err(format!("Declarations not supported"));
},
&Statement::ExpressionStatement(ref expr) => {
last = self.infer(expr)?;
}
}
}
Ok(last)
}
fn infer(&mut self, expr: &Expression) -> TypeCheckResult {
match (&expr.0, &expr.1) {
(exprtype, &Some(ref anno)) => {
let tx = self.infer_no_anno(exprtype)?;
let ty = self.from_anno(anno);
self.unify(tx, ty)
},
(exprtype, &None) => self.infer_no_anno(exprtype),
}
}
fn infer_no_anno(&mut self, ex: &ExpressionType) -> TypeCheckResult {
use self::ExpressionType::*;
use self::Type::*;
use self::TypeConst::*;
Ok(match ex {
&IntLiteral(_) => TConst(Integer),
&FloatLiteral(_) => TConst(Float),
&StringLiteral(_) => TConst(StringT),
&BoolLiteral(_) => TConst(Boolean),
&Value(ref name, _) => {
self.lookup(name)
.map(|entry| entry.ty)
.ok_or(format!("Couldn't find {}", name))?
},
&BinExp(ref op, ref lhs, ref rhs) => {
let t_lhs = self.infer(lhs)?;
match self.infer_op(op)? {
TFunc(t1, t2) => {
let _ = self.unify(t_lhs, *t1)?;
let t_rhs = self.infer(rhs)?;
let x = *t2;
match x {
TFunc(t3, t4) => {
let _ = self.unify(t_rhs, *t3)?;
*t4
},
_ => return Err(format!("Not a function type either")),
}
},
_ => return Err(format!("Op {:?} is not a function type", op)),
}
},
&Call { ref f, ref arguments } => {
let tf = self.infer(f)?;
let targ = self.infer(arguments.get(0).unwrap())?;
match tf {
TFunc(box t1, box t2) => {
let _ = self.unify(t1, targ)?;
t2
},
_ => return Err(format!("Not a function!")),
}
},
_ => TConst(Bottom),
})
}
fn infer_op(&mut self, op: &Operation) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
macro_rules! binoptype {
($lhs:expr, $rhs:expr, $out:expr) => { TFunc(Box::new($lhs), Box::new(TFunc(Box::new($rhs), Box::new($out)))) };
}
Ok(match (*op.0).as_ref() {
"+" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"++" => binoptype!(TConst(StringT), TConst(StringT), TConst(StringT)),
"-" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"*" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"/" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"%" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
_ => TConst(Bottom)
})
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeCheckResult {
use self::Type::*;
use self::TypeVar::*;
println!("Calling unify with `{:?}` and `{:?}`", t1, t2);
match (&t1, &t2) {
(&TConst(ref c1), &TConst(ref c2)) if c1 == c2 => Ok(TConst(c1.clone())),
(&TFunc(ref t1, ref t2), &TFunc(ref t3, ref t4)) => {
let t5 = self.unify(*t1.clone().clone(), *t3.clone().clone())?;
let t6 = self.unify(*t2.clone().clone(), *t4.clone().clone())?;
Ok(TFunc(Box::new(t5), Box::new(t6)))
},
(&TVar(Univ(ref a)), &TVar(Univ(ref b))) => {
if a == b {
Ok(TVar(Univ(a.clone())))
} else {
Err(format!("Couldn't unify universal types {} and {}", a, b))
}
},
//the interesting case!!
(&TVar(Exist(ref a)), ref t2) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t1) => self.unify(t1.clone().clone(), t2.clone().clone()),
None => {
self.evar_table.insert(*a, t2.clone().clone());
Ok(t2.clone().clone())
} | },
(ref t1, &TVar(Exist(ref a))) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t2) => self.unify(t2.clone().clone(), t1.clone().clone()),
None => {
self.evar_table.insert(*a, t1.clone().clone());
Ok(t1.clone().clone())
}
}
},
_ => Err(format!("Types {:?} and {:?} don't unify", t1, t2))
}
}
}
#[cfg(test)]
mod tests {
use super::{Type, TypeVar, TypeConst, TypeContext};
use super::Type::*;
use super::TypeConst::*;
use schala_lang::parsing::{parse, tokenize};
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let mut tc = TypeContext::new();
let ast = parse(tokenize($input)).0.unwrap() ;
tc.add_symbols(&ast);
assert_eq!($correct, tc.type_check(&ast).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", TConst(Integer));
type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
} | } | random_line_split |
keyboard.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Baseview modifications to druid code:
// - collect functions from various files
// - update imports, paths etc
//! X11 keyboard handling
use xcb::xproto;
use keyboard_types::*;
use crate::keyboard::code_to_location;
/// Convert a hardware scan code to a key.
///
/// Note: this is a hardcoded layout. We need to detect the user's
/// layout from the system and apply it.
fn code_to_key(code: Code, m: Modifiers) -> Key {
fn a(s: &str) -> Key {
Key::Character(s.into())
}
fn s(mods: Modifiers, base: &str, shifted: &str) -> Key {
if mods.contains(Modifiers::SHIFT) {
Key::Character(shifted.into())
} else {
Key::Character(base.into())
}
}
fn n(mods: Modifiers, base: Key, num: &str) -> Key {
if mods.contains(Modifiers::NUM_LOCK)!= mods.contains(Modifiers::SHIFT) {
Key::Character(num.into())
} else {
base
}
}
match code {
Code::KeyA => s(m, "a", "A"),
Code::KeyB => s(m, "b", "B"),
Code::KeyC => s(m, "c", "C"),
Code::KeyD => s(m, "d", "D"),
Code::KeyE => s(m, "e", "E"),
Code::KeyF => s(m, "f", "F"),
Code::KeyG => s(m, "g", "G"),
Code::KeyH => s(m, "h", "H"),
Code::KeyI => s(m, "i", "I"),
Code::KeyJ => s(m, "j", "J"),
Code::KeyK => s(m, "k", "K"),
Code::KeyL => s(m, "l", "L"),
Code::KeyM => s(m, "m", "M"),
Code::KeyN => s(m, "n", "N"),
Code::KeyO => s(m, "o", "O"),
Code::KeyP => s(m, "p", "P"),
Code::KeyQ => s(m, "q", "Q"),
Code::KeyR => s(m, "r", "R"),
Code::KeyS => s(m, "s", "S"),
Code::KeyT => s(m, "t", "T"),
Code::KeyU => s(m, "u", "U"),
Code::KeyV => s(m, "v", "V"),
Code::KeyW => s(m, "w", "W"),
Code::KeyX => s(m, "x", "X"),
Code::KeyY => s(m, "y", "Y"),
Code::KeyZ => s(m, "z", "Z"),
Code::Digit0 => s(m, "0", ")"),
Code::Digit1 => s(m, "1", "!"),
Code::Digit2 => s(m, "2", "@"),
Code::Digit3 => s(m, "3", "#"),
Code::Digit4 => s(m, "4", "$"),
Code::Digit5 => s(m, "5", "%"),
Code::Digit6 => s(m, "6", "^"),
Code::Digit7 => s(m, "7", "&"),
Code::Digit8 => s(m, "8", "*"),
Code::Digit9 => s(m, "9", "("),
Code::Backquote => s(m, "`", "~"),
Code::Minus => s(m, "-", "_"),
Code::Equal => s(m, "=", "+"),
Code::BracketLeft => s(m, "[", "{"),
Code::BracketRight => s(m, "]", "}"),
Code::Backslash => s(m, "\\", "|"),
Code::Semicolon => s(m, ";", ":"),
Code::Quote => s(m, "'", "\""),
Code::Comma => s(m, ",", "<"),
Code::Period => s(m, ".", ">"),
Code::Slash => s(m, "/", "?"),
Code::Space => a(" "),
Code::Escape => Key::Escape,
Code::Backspace => Key::Backspace,
Code::Tab => Key::Tab,
Code::Enter => Key::Enter,
Code::ControlLeft => Key::Control,
Code::ShiftLeft => Key::Shift,
Code::ShiftRight => Key::Shift,
Code::NumpadMultiply => a("*"),
Code::AltLeft => Key::Alt,
Code::CapsLock => Key::CapsLock,
Code::F1 => Key::F1,
Code::F2 => Key::F2,
Code::F3 => Key::F3,
Code::F4 => Key::F4,
Code::F5 => Key::F5,
Code::F6 => Key::F6,
Code::F7 => Key::F7,
Code::F8 => Key::F8,
Code::F9 => Key::F9,
Code::F10 => Key::F10,
Code::NumLock => Key::NumLock,
Code::ScrollLock => Key::ScrollLock,
Code::Numpad0 => n(m, Key::Insert, "0"),
Code::Numpad1 => n(m, Key::End, "1"),
Code::Numpad2 => n(m, Key::ArrowDown, "2"),
Code::Numpad3 => n(m, Key::PageDown, "3"),
Code::Numpad4 => n(m, Key::ArrowLeft, "4"),
Code::Numpad5 => n(m, Key::Clear, "5"),
Code::Numpad6 => n(m, Key::ArrowRight, "6"),
Code::Numpad7 => n(m, Key::Home, "7"),
Code::Numpad8 => n(m, Key::ArrowUp, "8"),
Code::Numpad9 => n(m, Key::PageUp, "9"),
Code::NumpadSubtract => a("-"),
Code::NumpadAdd => a("+"),
Code::NumpadDecimal => n(m, Key::Delete, "."),
Code::IntlBackslash => s(m, "\\", "|"),
Code::F11 => Key::F11,
Code::F12 => Key::F12,
// This mapping is based on the picture in the w3c spec.
Code::IntlRo => a("\\"),
Code::Convert => Key::Convert,
Code::KanaMode => Key::KanaMode,
Code::NonConvert => Key::NonConvert,
Code::NumpadEnter => Key::Enter,
Code::ControlRight => Key::Control,
Code::NumpadDivide => a("/"),
Code::PrintScreen => Key::PrintScreen,
Code::AltRight => Key::Alt,
Code::Home => Key::Home,
Code::ArrowUp => Key::ArrowUp,
Code::PageUp => Key::PageUp,
Code::ArrowLeft => Key::ArrowLeft,
Code::ArrowRight => Key::ArrowRight,
Code::End => Key::End,
Code::ArrowDown => Key::ArrowDown,
Code::PageDown => Key::PageDown,
Code::Insert => Key::Insert,
Code::Delete => Key::Delete,
Code::AudioVolumeMute => Key::AudioVolumeMute,
Code::AudioVolumeDown => Key::AudioVolumeDown,
Code::AudioVolumeUp => Key::AudioVolumeUp,
Code::NumpadEqual => a("="),
Code::Pause => Key::Pause,
Code::NumpadComma => a(","),
Code::Lang1 => Key::HangulMode,
Code::Lang2 => Key::HanjaMode,
Code::IntlYen => a("¥"),
Code::MetaLeft => Key::Meta,
Code::MetaRight => Key::Meta,
Code::ContextMenu => Key::ContextMenu,
Code::BrowserStop => Key::BrowserStop,
Code::Again => Key::Again,
Code::Props => Key::Props,
Code::Undo => Key::Undo,
Code::Select => Key::Select,
Code::Copy => Key::Copy,
Code::Open => Key::Open,
Code::Paste => Key::Paste,
Code::Find => Key::Find,
Code::Cut => Key::Cut,
Code::Help => Key::Help,
Code::LaunchApp2 => Key::LaunchApplication2,
Code::WakeUp => Key::WakeUp,
Code::LaunchApp1 => Key::LaunchApplication1,
Code::LaunchMail => Key::LaunchMail,
Code::BrowserFavorites => Key::BrowserFavorites,
Code::BrowserBack => Key::BrowserBack,
Code::BrowserForward => Key::BrowserForward,
Code::Eject => Key::Eject,
Code::MediaTrackNext => Key::MediaTrackNext,
Code::MediaPlayPause => Key::MediaPlayPause,
Code::MediaTrackPrevious => Key::MediaTrackPrevious,
Code::MediaStop => Key::MediaStop,
Code::MediaSelect => Key::LaunchMediaPlayer,
Code::BrowserHome => Key::BrowserHome,
Code::BrowserRefresh => Key::BrowserRefresh,
Code::BrowserSearch => Key::BrowserSearch,
_ => Key::Unidentified,
}
}
#[cfg(target_os = "linux")]
/// Map hardware keycode to code.
///
/// In theory, the hardware keycode is device dependent, but in
/// practice it's probably pretty reliable.
///
/// The logic is based on NativeKeyToDOMCodeName.h in Mozilla.
fn hardware_keycode_to_code(hw_keycode: u16) -> Code {
match hw_keycode {
0x0009 => Code::Escape,
0x000A => Code::Digit1,
0x000B => Code::Digit2,
0x000C => Code::Digit3,
0x000D => Code::Digit4,
0x000E => Code::Digit5,
0x000F => Code::Digit6,
0x0010 => Code::Digit7,
0x0011 => Code::Digit8,
0x0012 => Code::Digit9,
0x0013 => Code::Digit0,
0x0014 => Code::Minus,
0x0015 => Code::Equal,
0x0016 => Code::Backspace,
0x0017 => Code::Tab,
0x0018 => Code::KeyQ,
0x0019 => Code::KeyW,
0x001A => Code::KeyE,
0x001B => Code::KeyR,
0x001C => Code::KeyT,
0x001D => Code::KeyY,
0x001E => Code::KeyU,
0x001F => Code::KeyI,
0x0020 => Code::KeyO,
0x0021 => Code::KeyP,
0x0022 => Code::BracketLeft,
0x0023 => Code::BracketRight,
0x0024 => Code::Enter,
0x0025 => Code::ControlLeft,
0x0026 => Code::KeyA,
0x0027 => Code::KeyS,
0x0028 => Code::KeyD,
0x0029 => Code::KeyF,
0x002A => Code::KeyG,
0x002B => Code::KeyH,
0x002C => Code::KeyJ,
0x002D => Code::KeyK,
0x002E => Code::KeyL,
0x002F => Code::Semicolon,
0x0030 => Code::Quote,
0x0031 => Code::Backquote,
0x0032 => Code::ShiftLeft,
0x0033 => Code::Backslash,
0x0034 => Code::KeyZ,
0x0035 => Code::KeyX,
0x0036 => Code::KeyC,
0x0037 => Code::KeyV,
0x0038 => Code::KeyB,
0x0039 => Code::KeyN,
0x003A => Code::KeyM,
0x003B => Code::Comma,
0x003C => Code::Period,
0x003D => Code::Slash,
0x003E => Code::ShiftRight,
0x003F => Code::NumpadMultiply,
0x0040 => Code::AltLeft,
0x0041 => Code::Space,
0x0042 => Code::CapsLock,
0x0043 => Code::F1,
0x0044 => Code::F2,
0x0045 => Code::F3,
0x0046 => Code::F4,
0x0047 => Code::F5,
0x0048 => Code::F6, | 0x004D => Code::NumLock,
0x004E => Code::ScrollLock,
0x004F => Code::Numpad7,
0x0050 => Code::Numpad8,
0x0051 => Code::Numpad9,
0x0052 => Code::NumpadSubtract,
0x0053 => Code::Numpad4,
0x0054 => Code::Numpad5,
0x0055 => Code::Numpad6,
0x0056 => Code::NumpadAdd,
0x0057 => Code::Numpad1,
0x0058 => Code::Numpad2,
0x0059 => Code::Numpad3,
0x005A => Code::Numpad0,
0x005B => Code::NumpadDecimal,
0x005E => Code::IntlBackslash,
0x005F => Code::F11,
0x0060 => Code::F12,
0x0061 => Code::IntlRo,
0x0064 => Code::Convert,
0x0065 => Code::KanaMode,
0x0066 => Code::NonConvert,
0x0068 => Code::NumpadEnter,
0x0069 => Code::ControlRight,
0x006A => Code::NumpadDivide,
0x006B => Code::PrintScreen,
0x006C => Code::AltRight,
0x006E => Code::Home,
0x006F => Code::ArrowUp,
0x0070 => Code::PageUp,
0x0071 => Code::ArrowLeft,
0x0072 => Code::ArrowRight,
0x0073 => Code::End,
0x0074 => Code::ArrowDown,
0x0075 => Code::PageDown,
0x0076 => Code::Insert,
0x0077 => Code::Delete,
0x0079 => Code::AudioVolumeMute,
0x007A => Code::AudioVolumeDown,
0x007B => Code::AudioVolumeUp,
0x007D => Code::NumpadEqual,
0x007F => Code::Pause,
0x0081 => Code::NumpadComma,
0x0082 => Code::Lang1,
0x0083 => Code::Lang2,
0x0084 => Code::IntlYen,
0x0085 => Code::MetaLeft,
0x0086 => Code::MetaRight,
0x0087 => Code::ContextMenu,
0x0088 => Code::BrowserStop,
0x0089 => Code::Again,
0x008A => Code::Props,
0x008B => Code::Undo,
0x008C => Code::Select,
0x008D => Code::Copy,
0x008E => Code::Open,
0x008F => Code::Paste,
0x0090 => Code::Find,
0x0091 => Code::Cut,
0x0092 => Code::Help,
0x0094 => Code::LaunchApp2,
0x0097 => Code::WakeUp,
0x0098 => Code::LaunchApp1,
// key to right of volume controls on T430s produces 0x9C
// but no documentation of what it should map to :/
0x00A3 => Code::LaunchMail,
0x00A4 => Code::BrowserFavorites,
0x00A6 => Code::BrowserBack,
0x00A7 => Code::BrowserForward,
0x00A9 => Code::Eject,
0x00AB => Code::MediaTrackNext,
0x00AC => Code::MediaPlayPause,
0x00AD => Code::MediaTrackPrevious,
0x00AE => Code::MediaStop,
0x00B3 => Code::MediaSelect,
0x00B4 => Code::BrowserHome,
0x00B5 => Code::BrowserRefresh,
0x00E1 => Code::BrowserSearch,
_ => Code::Unidentified,
}
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::MOD_MASK_SHIFT, Modifiers::SHIFT),
(xproto::MOD_MASK_CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::MOD_MASK_1, Modifiers::ALT),
(xproto::MOD_MASK_2, Modifiers::NUM_LOCK),
(xproto::MOD_MASK_4, Modifiers::META),
(xproto::MOD_MASK_LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & (*mask as u16)!= 0 {
ret |= *modifiers;
}
}
ret
}
pub(super) fn convert_key_press_event(key_press: &xcb::KeyPressEvent) -> KeyboardEvent {
let hw_keycode = key_press.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_press.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Down;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
pub(super) fn convert_key_release_event(key_release: &xcb::KeyReleaseEvent) -> KeyboardEvent {
let hw_keycode = key_release.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_release.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Up;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
} | 0x0049 => Code::F7,
0x004A => Code::F8,
0x004B => Code::F9,
0x004C => Code::F10, | random_line_split |
keyboard.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Baseview modifications to druid code:
// - collect functions from various files
// - update imports, paths etc
//! X11 keyboard handling
use xcb::xproto;
use keyboard_types::*;
use crate::keyboard::code_to_location;
/// Convert a hardware scan code to a key.
///
/// Note: this is a hardcoded layout. We need to detect the user's
/// layout from the system and apply it.
fn | (code: Code, m: Modifiers) -> Key {
fn a(s: &str) -> Key {
Key::Character(s.into())
}
fn s(mods: Modifiers, base: &str, shifted: &str) -> Key {
if mods.contains(Modifiers::SHIFT) {
Key::Character(shifted.into())
} else {
Key::Character(base.into())
}
}
fn n(mods: Modifiers, base: Key, num: &str) -> Key {
if mods.contains(Modifiers::NUM_LOCK)!= mods.contains(Modifiers::SHIFT) {
Key::Character(num.into())
} else {
base
}
}
match code {
Code::KeyA => s(m, "a", "A"),
Code::KeyB => s(m, "b", "B"),
Code::KeyC => s(m, "c", "C"),
Code::KeyD => s(m, "d", "D"),
Code::KeyE => s(m, "e", "E"),
Code::KeyF => s(m, "f", "F"),
Code::KeyG => s(m, "g", "G"),
Code::KeyH => s(m, "h", "H"),
Code::KeyI => s(m, "i", "I"),
Code::KeyJ => s(m, "j", "J"),
Code::KeyK => s(m, "k", "K"),
Code::KeyL => s(m, "l", "L"),
Code::KeyM => s(m, "m", "M"),
Code::KeyN => s(m, "n", "N"),
Code::KeyO => s(m, "o", "O"),
Code::KeyP => s(m, "p", "P"),
Code::KeyQ => s(m, "q", "Q"),
Code::KeyR => s(m, "r", "R"),
Code::KeyS => s(m, "s", "S"),
Code::KeyT => s(m, "t", "T"),
Code::KeyU => s(m, "u", "U"),
Code::KeyV => s(m, "v", "V"),
Code::KeyW => s(m, "w", "W"),
Code::KeyX => s(m, "x", "X"),
Code::KeyY => s(m, "y", "Y"),
Code::KeyZ => s(m, "z", "Z"),
Code::Digit0 => s(m, "0", ")"),
Code::Digit1 => s(m, "1", "!"),
Code::Digit2 => s(m, "2", "@"),
Code::Digit3 => s(m, "3", "#"),
Code::Digit4 => s(m, "4", "$"),
Code::Digit5 => s(m, "5", "%"),
Code::Digit6 => s(m, "6", "^"),
Code::Digit7 => s(m, "7", "&"),
Code::Digit8 => s(m, "8", "*"),
Code::Digit9 => s(m, "9", "("),
Code::Backquote => s(m, "`", "~"),
Code::Minus => s(m, "-", "_"),
Code::Equal => s(m, "=", "+"),
Code::BracketLeft => s(m, "[", "{"),
Code::BracketRight => s(m, "]", "}"),
Code::Backslash => s(m, "\\", "|"),
Code::Semicolon => s(m, ";", ":"),
Code::Quote => s(m, "'", "\""),
Code::Comma => s(m, ",", "<"),
Code::Period => s(m, ".", ">"),
Code::Slash => s(m, "/", "?"),
Code::Space => a(" "),
Code::Escape => Key::Escape,
Code::Backspace => Key::Backspace,
Code::Tab => Key::Tab,
Code::Enter => Key::Enter,
Code::ControlLeft => Key::Control,
Code::ShiftLeft => Key::Shift,
Code::ShiftRight => Key::Shift,
Code::NumpadMultiply => a("*"),
Code::AltLeft => Key::Alt,
Code::CapsLock => Key::CapsLock,
Code::F1 => Key::F1,
Code::F2 => Key::F2,
Code::F3 => Key::F3,
Code::F4 => Key::F4,
Code::F5 => Key::F5,
Code::F6 => Key::F6,
Code::F7 => Key::F7,
Code::F8 => Key::F8,
Code::F9 => Key::F9,
Code::F10 => Key::F10,
Code::NumLock => Key::NumLock,
Code::ScrollLock => Key::ScrollLock,
Code::Numpad0 => n(m, Key::Insert, "0"),
Code::Numpad1 => n(m, Key::End, "1"),
Code::Numpad2 => n(m, Key::ArrowDown, "2"),
Code::Numpad3 => n(m, Key::PageDown, "3"),
Code::Numpad4 => n(m, Key::ArrowLeft, "4"),
Code::Numpad5 => n(m, Key::Clear, "5"),
Code::Numpad6 => n(m, Key::ArrowRight, "6"),
Code::Numpad7 => n(m, Key::Home, "7"),
Code::Numpad8 => n(m, Key::ArrowUp, "8"),
Code::Numpad9 => n(m, Key::PageUp, "9"),
Code::NumpadSubtract => a("-"),
Code::NumpadAdd => a("+"),
Code::NumpadDecimal => n(m, Key::Delete, "."),
Code::IntlBackslash => s(m, "\\", "|"),
Code::F11 => Key::F11,
Code::F12 => Key::F12,
// This mapping is based on the picture in the w3c spec.
Code::IntlRo => a("\\"),
Code::Convert => Key::Convert,
Code::KanaMode => Key::KanaMode,
Code::NonConvert => Key::NonConvert,
Code::NumpadEnter => Key::Enter,
Code::ControlRight => Key::Control,
Code::NumpadDivide => a("/"),
Code::PrintScreen => Key::PrintScreen,
Code::AltRight => Key::Alt,
Code::Home => Key::Home,
Code::ArrowUp => Key::ArrowUp,
Code::PageUp => Key::PageUp,
Code::ArrowLeft => Key::ArrowLeft,
Code::ArrowRight => Key::ArrowRight,
Code::End => Key::End,
Code::ArrowDown => Key::ArrowDown,
Code::PageDown => Key::PageDown,
Code::Insert => Key::Insert,
Code::Delete => Key::Delete,
Code::AudioVolumeMute => Key::AudioVolumeMute,
Code::AudioVolumeDown => Key::AudioVolumeDown,
Code::AudioVolumeUp => Key::AudioVolumeUp,
Code::NumpadEqual => a("="),
Code::Pause => Key::Pause,
Code::NumpadComma => a(","),
Code::Lang1 => Key::HangulMode,
Code::Lang2 => Key::HanjaMode,
Code::IntlYen => a("¥"),
Code::MetaLeft => Key::Meta,
Code::MetaRight => Key::Meta,
Code::ContextMenu => Key::ContextMenu,
Code::BrowserStop => Key::BrowserStop,
Code::Again => Key::Again,
Code::Props => Key::Props,
Code::Undo => Key::Undo,
Code::Select => Key::Select,
Code::Copy => Key::Copy,
Code::Open => Key::Open,
Code::Paste => Key::Paste,
Code::Find => Key::Find,
Code::Cut => Key::Cut,
Code::Help => Key::Help,
Code::LaunchApp2 => Key::LaunchApplication2,
Code::WakeUp => Key::WakeUp,
Code::LaunchApp1 => Key::LaunchApplication1,
Code::LaunchMail => Key::LaunchMail,
Code::BrowserFavorites => Key::BrowserFavorites,
Code::BrowserBack => Key::BrowserBack,
Code::BrowserForward => Key::BrowserForward,
Code::Eject => Key::Eject,
Code::MediaTrackNext => Key::MediaTrackNext,
Code::MediaPlayPause => Key::MediaPlayPause,
Code::MediaTrackPrevious => Key::MediaTrackPrevious,
Code::MediaStop => Key::MediaStop,
Code::MediaSelect => Key::LaunchMediaPlayer,
Code::BrowserHome => Key::BrowserHome,
Code::BrowserRefresh => Key::BrowserRefresh,
Code::BrowserSearch => Key::BrowserSearch,
_ => Key::Unidentified,
}
}
#[cfg(target_os = "linux")]
/// Map hardware keycode to code.
///
/// In theory, the hardware keycode is device dependent, but in
/// practice it's probably pretty reliable.
///
/// The logic is based on NativeKeyToDOMCodeName.h in Mozilla.
fn hardware_keycode_to_code(hw_keycode: u16) -> Code {
match hw_keycode {
0x0009 => Code::Escape,
0x000A => Code::Digit1,
0x000B => Code::Digit2,
0x000C => Code::Digit3,
0x000D => Code::Digit4,
0x000E => Code::Digit5,
0x000F => Code::Digit6,
0x0010 => Code::Digit7,
0x0011 => Code::Digit8,
0x0012 => Code::Digit9,
0x0013 => Code::Digit0,
0x0014 => Code::Minus,
0x0015 => Code::Equal,
0x0016 => Code::Backspace,
0x0017 => Code::Tab,
0x0018 => Code::KeyQ,
0x0019 => Code::KeyW,
0x001A => Code::KeyE,
0x001B => Code::KeyR,
0x001C => Code::KeyT,
0x001D => Code::KeyY,
0x001E => Code::KeyU,
0x001F => Code::KeyI,
0x0020 => Code::KeyO,
0x0021 => Code::KeyP,
0x0022 => Code::BracketLeft,
0x0023 => Code::BracketRight,
0x0024 => Code::Enter,
0x0025 => Code::ControlLeft,
0x0026 => Code::KeyA,
0x0027 => Code::KeyS,
0x0028 => Code::KeyD,
0x0029 => Code::KeyF,
0x002A => Code::KeyG,
0x002B => Code::KeyH,
0x002C => Code::KeyJ,
0x002D => Code::KeyK,
0x002E => Code::KeyL,
0x002F => Code::Semicolon,
0x0030 => Code::Quote,
0x0031 => Code::Backquote,
0x0032 => Code::ShiftLeft,
0x0033 => Code::Backslash,
0x0034 => Code::KeyZ,
0x0035 => Code::KeyX,
0x0036 => Code::KeyC,
0x0037 => Code::KeyV,
0x0038 => Code::KeyB,
0x0039 => Code::KeyN,
0x003A => Code::KeyM,
0x003B => Code::Comma,
0x003C => Code::Period,
0x003D => Code::Slash,
0x003E => Code::ShiftRight,
0x003F => Code::NumpadMultiply,
0x0040 => Code::AltLeft,
0x0041 => Code::Space,
0x0042 => Code::CapsLock,
0x0043 => Code::F1,
0x0044 => Code::F2,
0x0045 => Code::F3,
0x0046 => Code::F4,
0x0047 => Code::F5,
0x0048 => Code::F6,
0x0049 => Code::F7,
0x004A => Code::F8,
0x004B => Code::F9,
0x004C => Code::F10,
0x004D => Code::NumLock,
0x004E => Code::ScrollLock,
0x004F => Code::Numpad7,
0x0050 => Code::Numpad8,
0x0051 => Code::Numpad9,
0x0052 => Code::NumpadSubtract,
0x0053 => Code::Numpad4,
0x0054 => Code::Numpad5,
0x0055 => Code::Numpad6,
0x0056 => Code::NumpadAdd,
0x0057 => Code::Numpad1,
0x0058 => Code::Numpad2,
0x0059 => Code::Numpad3,
0x005A => Code::Numpad0,
0x005B => Code::NumpadDecimal,
0x005E => Code::IntlBackslash,
0x005F => Code::F11,
0x0060 => Code::F12,
0x0061 => Code::IntlRo,
0x0064 => Code::Convert,
0x0065 => Code::KanaMode,
0x0066 => Code::NonConvert,
0x0068 => Code::NumpadEnter,
0x0069 => Code::ControlRight,
0x006A => Code::NumpadDivide,
0x006B => Code::PrintScreen,
0x006C => Code::AltRight,
0x006E => Code::Home,
0x006F => Code::ArrowUp,
0x0070 => Code::PageUp,
0x0071 => Code::ArrowLeft,
0x0072 => Code::ArrowRight,
0x0073 => Code::End,
0x0074 => Code::ArrowDown,
0x0075 => Code::PageDown,
0x0076 => Code::Insert,
0x0077 => Code::Delete,
0x0079 => Code::AudioVolumeMute,
0x007A => Code::AudioVolumeDown,
0x007B => Code::AudioVolumeUp,
0x007D => Code::NumpadEqual,
0x007F => Code::Pause,
0x0081 => Code::NumpadComma,
0x0082 => Code::Lang1,
0x0083 => Code::Lang2,
0x0084 => Code::IntlYen,
0x0085 => Code::MetaLeft,
0x0086 => Code::MetaRight,
0x0087 => Code::ContextMenu,
0x0088 => Code::BrowserStop,
0x0089 => Code::Again,
0x008A => Code::Props,
0x008B => Code::Undo,
0x008C => Code::Select,
0x008D => Code::Copy,
0x008E => Code::Open,
0x008F => Code::Paste,
0x0090 => Code::Find,
0x0091 => Code::Cut,
0x0092 => Code::Help,
0x0094 => Code::LaunchApp2,
0x0097 => Code::WakeUp,
0x0098 => Code::LaunchApp1,
// key to right of volume controls on T430s produces 0x9C
// but no documentation of what it should map to :/
0x00A3 => Code::LaunchMail,
0x00A4 => Code::BrowserFavorites,
0x00A6 => Code::BrowserBack,
0x00A7 => Code::BrowserForward,
0x00A9 => Code::Eject,
0x00AB => Code::MediaTrackNext,
0x00AC => Code::MediaPlayPause,
0x00AD => Code::MediaTrackPrevious,
0x00AE => Code::MediaStop,
0x00B3 => Code::MediaSelect,
0x00B4 => Code::BrowserHome,
0x00B5 => Code::BrowserRefresh,
0x00E1 => Code::BrowserSearch,
_ => Code::Unidentified,
}
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::MOD_MASK_SHIFT, Modifiers::SHIFT),
(xproto::MOD_MASK_CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::MOD_MASK_1, Modifiers::ALT),
(xproto::MOD_MASK_2, Modifiers::NUM_LOCK),
(xproto::MOD_MASK_4, Modifiers::META),
(xproto::MOD_MASK_LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & (*mask as u16)!= 0 {
ret |= *modifiers;
}
}
ret
}
pub(super) fn convert_key_press_event(key_press: &xcb::KeyPressEvent) -> KeyboardEvent {
let hw_keycode = key_press.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_press.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Down;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
pub(super) fn convert_key_release_event(key_release: &xcb::KeyReleaseEvent) -> KeyboardEvent {
let hw_keycode = key_release.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_release.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Up;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
| code_to_key | identifier_name |
keyboard.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Baseview modifications to druid code:
// - collect functions from various files
// - update imports, paths etc
//! X11 keyboard handling
use xcb::xproto;
use keyboard_types::*;
use crate::keyboard::code_to_location;
/// Convert a hardware scan code to a key.
///
/// Note: this is a hardcoded layout. We need to detect the user's
/// layout from the system and apply it.
fn code_to_key(code: Code, m: Modifiers) -> Key {
fn a(s: &str) -> Key {
Key::Character(s.into())
}
fn s(mods: Modifiers, base: &str, shifted: &str) -> Key |
fn n(mods: Modifiers, base: Key, num: &str) -> Key {
if mods.contains(Modifiers::NUM_LOCK)!= mods.contains(Modifiers::SHIFT) {
Key::Character(num.into())
} else {
base
}
}
match code {
Code::KeyA => s(m, "a", "A"),
Code::KeyB => s(m, "b", "B"),
Code::KeyC => s(m, "c", "C"),
Code::KeyD => s(m, "d", "D"),
Code::KeyE => s(m, "e", "E"),
Code::KeyF => s(m, "f", "F"),
Code::KeyG => s(m, "g", "G"),
Code::KeyH => s(m, "h", "H"),
Code::KeyI => s(m, "i", "I"),
Code::KeyJ => s(m, "j", "J"),
Code::KeyK => s(m, "k", "K"),
Code::KeyL => s(m, "l", "L"),
Code::KeyM => s(m, "m", "M"),
Code::KeyN => s(m, "n", "N"),
Code::KeyO => s(m, "o", "O"),
Code::KeyP => s(m, "p", "P"),
Code::KeyQ => s(m, "q", "Q"),
Code::KeyR => s(m, "r", "R"),
Code::KeyS => s(m, "s", "S"),
Code::KeyT => s(m, "t", "T"),
Code::KeyU => s(m, "u", "U"),
Code::KeyV => s(m, "v", "V"),
Code::KeyW => s(m, "w", "W"),
Code::KeyX => s(m, "x", "X"),
Code::KeyY => s(m, "y", "Y"),
Code::KeyZ => s(m, "z", "Z"),
Code::Digit0 => s(m, "0", ")"),
Code::Digit1 => s(m, "1", "!"),
Code::Digit2 => s(m, "2", "@"),
Code::Digit3 => s(m, "3", "#"),
Code::Digit4 => s(m, "4", "$"),
Code::Digit5 => s(m, "5", "%"),
Code::Digit6 => s(m, "6", "^"),
Code::Digit7 => s(m, "7", "&"),
Code::Digit8 => s(m, "8", "*"),
Code::Digit9 => s(m, "9", "("),
Code::Backquote => s(m, "`", "~"),
Code::Minus => s(m, "-", "_"),
Code::Equal => s(m, "=", "+"),
Code::BracketLeft => s(m, "[", "{"),
Code::BracketRight => s(m, "]", "}"),
Code::Backslash => s(m, "\\", "|"),
Code::Semicolon => s(m, ";", ":"),
Code::Quote => s(m, "'", "\""),
Code::Comma => s(m, ",", "<"),
Code::Period => s(m, ".", ">"),
Code::Slash => s(m, "/", "?"),
Code::Space => a(" "),
Code::Escape => Key::Escape,
Code::Backspace => Key::Backspace,
Code::Tab => Key::Tab,
Code::Enter => Key::Enter,
Code::ControlLeft => Key::Control,
Code::ShiftLeft => Key::Shift,
Code::ShiftRight => Key::Shift,
Code::NumpadMultiply => a("*"),
Code::AltLeft => Key::Alt,
Code::CapsLock => Key::CapsLock,
Code::F1 => Key::F1,
Code::F2 => Key::F2,
Code::F3 => Key::F3,
Code::F4 => Key::F4,
Code::F5 => Key::F5,
Code::F6 => Key::F6,
Code::F7 => Key::F7,
Code::F8 => Key::F8,
Code::F9 => Key::F9,
Code::F10 => Key::F10,
Code::NumLock => Key::NumLock,
Code::ScrollLock => Key::ScrollLock,
Code::Numpad0 => n(m, Key::Insert, "0"),
Code::Numpad1 => n(m, Key::End, "1"),
Code::Numpad2 => n(m, Key::ArrowDown, "2"),
Code::Numpad3 => n(m, Key::PageDown, "3"),
Code::Numpad4 => n(m, Key::ArrowLeft, "4"),
Code::Numpad5 => n(m, Key::Clear, "5"),
Code::Numpad6 => n(m, Key::ArrowRight, "6"),
Code::Numpad7 => n(m, Key::Home, "7"),
Code::Numpad8 => n(m, Key::ArrowUp, "8"),
Code::Numpad9 => n(m, Key::PageUp, "9"),
Code::NumpadSubtract => a("-"),
Code::NumpadAdd => a("+"),
Code::NumpadDecimal => n(m, Key::Delete, "."),
Code::IntlBackslash => s(m, "\\", "|"),
Code::F11 => Key::F11,
Code::F12 => Key::F12,
// This mapping is based on the picture in the w3c spec.
Code::IntlRo => a("\\"),
Code::Convert => Key::Convert,
Code::KanaMode => Key::KanaMode,
Code::NonConvert => Key::NonConvert,
Code::NumpadEnter => Key::Enter,
Code::ControlRight => Key::Control,
Code::NumpadDivide => a("/"),
Code::PrintScreen => Key::PrintScreen,
Code::AltRight => Key::Alt,
Code::Home => Key::Home,
Code::ArrowUp => Key::ArrowUp,
Code::PageUp => Key::PageUp,
Code::ArrowLeft => Key::ArrowLeft,
Code::ArrowRight => Key::ArrowRight,
Code::End => Key::End,
Code::ArrowDown => Key::ArrowDown,
Code::PageDown => Key::PageDown,
Code::Insert => Key::Insert,
Code::Delete => Key::Delete,
Code::AudioVolumeMute => Key::AudioVolumeMute,
Code::AudioVolumeDown => Key::AudioVolumeDown,
Code::AudioVolumeUp => Key::AudioVolumeUp,
Code::NumpadEqual => a("="),
Code::Pause => Key::Pause,
Code::NumpadComma => a(","),
Code::Lang1 => Key::HangulMode,
Code::Lang2 => Key::HanjaMode,
Code::IntlYen => a("¥"),
Code::MetaLeft => Key::Meta,
Code::MetaRight => Key::Meta,
Code::ContextMenu => Key::ContextMenu,
Code::BrowserStop => Key::BrowserStop,
Code::Again => Key::Again,
Code::Props => Key::Props,
Code::Undo => Key::Undo,
Code::Select => Key::Select,
Code::Copy => Key::Copy,
Code::Open => Key::Open,
Code::Paste => Key::Paste,
Code::Find => Key::Find,
Code::Cut => Key::Cut,
Code::Help => Key::Help,
Code::LaunchApp2 => Key::LaunchApplication2,
Code::WakeUp => Key::WakeUp,
Code::LaunchApp1 => Key::LaunchApplication1,
Code::LaunchMail => Key::LaunchMail,
Code::BrowserFavorites => Key::BrowserFavorites,
Code::BrowserBack => Key::BrowserBack,
Code::BrowserForward => Key::BrowserForward,
Code::Eject => Key::Eject,
Code::MediaTrackNext => Key::MediaTrackNext,
Code::MediaPlayPause => Key::MediaPlayPause,
Code::MediaTrackPrevious => Key::MediaTrackPrevious,
Code::MediaStop => Key::MediaStop,
Code::MediaSelect => Key::LaunchMediaPlayer,
Code::BrowserHome => Key::BrowserHome,
Code::BrowserRefresh => Key::BrowserRefresh,
Code::BrowserSearch => Key::BrowserSearch,
_ => Key::Unidentified,
}
}
#[cfg(target_os = "linux")]
/// Map hardware keycode to code.
///
/// In theory, the hardware keycode is device dependent, but in
/// practice it's probably pretty reliable.
///
/// The logic is based on NativeKeyToDOMCodeName.h in Mozilla.
fn hardware_keycode_to_code(hw_keycode: u16) -> Code {
match hw_keycode {
0x0009 => Code::Escape,
0x000A => Code::Digit1,
0x000B => Code::Digit2,
0x000C => Code::Digit3,
0x000D => Code::Digit4,
0x000E => Code::Digit5,
0x000F => Code::Digit6,
0x0010 => Code::Digit7,
0x0011 => Code::Digit8,
0x0012 => Code::Digit9,
0x0013 => Code::Digit0,
0x0014 => Code::Minus,
0x0015 => Code::Equal,
0x0016 => Code::Backspace,
0x0017 => Code::Tab,
0x0018 => Code::KeyQ,
0x0019 => Code::KeyW,
0x001A => Code::KeyE,
0x001B => Code::KeyR,
0x001C => Code::KeyT,
0x001D => Code::KeyY,
0x001E => Code::KeyU,
0x001F => Code::KeyI,
0x0020 => Code::KeyO,
0x0021 => Code::KeyP,
0x0022 => Code::BracketLeft,
0x0023 => Code::BracketRight,
0x0024 => Code::Enter,
0x0025 => Code::ControlLeft,
0x0026 => Code::KeyA,
0x0027 => Code::KeyS,
0x0028 => Code::KeyD,
0x0029 => Code::KeyF,
0x002A => Code::KeyG,
0x002B => Code::KeyH,
0x002C => Code::KeyJ,
0x002D => Code::KeyK,
0x002E => Code::KeyL,
0x002F => Code::Semicolon,
0x0030 => Code::Quote,
0x0031 => Code::Backquote,
0x0032 => Code::ShiftLeft,
0x0033 => Code::Backslash,
0x0034 => Code::KeyZ,
0x0035 => Code::KeyX,
0x0036 => Code::KeyC,
0x0037 => Code::KeyV,
0x0038 => Code::KeyB,
0x0039 => Code::KeyN,
0x003A => Code::KeyM,
0x003B => Code::Comma,
0x003C => Code::Period,
0x003D => Code::Slash,
0x003E => Code::ShiftRight,
0x003F => Code::NumpadMultiply,
0x0040 => Code::AltLeft,
0x0041 => Code::Space,
0x0042 => Code::CapsLock,
0x0043 => Code::F1,
0x0044 => Code::F2,
0x0045 => Code::F3,
0x0046 => Code::F4,
0x0047 => Code::F5,
0x0048 => Code::F6,
0x0049 => Code::F7,
0x004A => Code::F8,
0x004B => Code::F9,
0x004C => Code::F10,
0x004D => Code::NumLock,
0x004E => Code::ScrollLock,
0x004F => Code::Numpad7,
0x0050 => Code::Numpad8,
0x0051 => Code::Numpad9,
0x0052 => Code::NumpadSubtract,
0x0053 => Code::Numpad4,
0x0054 => Code::Numpad5,
0x0055 => Code::Numpad6,
0x0056 => Code::NumpadAdd,
0x0057 => Code::Numpad1,
0x0058 => Code::Numpad2,
0x0059 => Code::Numpad3,
0x005A => Code::Numpad0,
0x005B => Code::NumpadDecimal,
0x005E => Code::IntlBackslash,
0x005F => Code::F11,
0x0060 => Code::F12,
0x0061 => Code::IntlRo,
0x0064 => Code::Convert,
0x0065 => Code::KanaMode,
0x0066 => Code::NonConvert,
0x0068 => Code::NumpadEnter,
0x0069 => Code::ControlRight,
0x006A => Code::NumpadDivide,
0x006B => Code::PrintScreen,
0x006C => Code::AltRight,
0x006E => Code::Home,
0x006F => Code::ArrowUp,
0x0070 => Code::PageUp,
0x0071 => Code::ArrowLeft,
0x0072 => Code::ArrowRight,
0x0073 => Code::End,
0x0074 => Code::ArrowDown,
0x0075 => Code::PageDown,
0x0076 => Code::Insert,
0x0077 => Code::Delete,
0x0079 => Code::AudioVolumeMute,
0x007A => Code::AudioVolumeDown,
0x007B => Code::AudioVolumeUp,
0x007D => Code::NumpadEqual,
0x007F => Code::Pause,
0x0081 => Code::NumpadComma,
0x0082 => Code::Lang1,
0x0083 => Code::Lang2,
0x0084 => Code::IntlYen,
0x0085 => Code::MetaLeft,
0x0086 => Code::MetaRight,
0x0087 => Code::ContextMenu,
0x0088 => Code::BrowserStop,
0x0089 => Code::Again,
0x008A => Code::Props,
0x008B => Code::Undo,
0x008C => Code::Select,
0x008D => Code::Copy,
0x008E => Code::Open,
0x008F => Code::Paste,
0x0090 => Code::Find,
0x0091 => Code::Cut,
0x0092 => Code::Help,
0x0094 => Code::LaunchApp2,
0x0097 => Code::WakeUp,
0x0098 => Code::LaunchApp1,
// key to right of volume controls on T430s produces 0x9C
// but no documentation of what it should map to :/
0x00A3 => Code::LaunchMail,
0x00A4 => Code::BrowserFavorites,
0x00A6 => Code::BrowserBack,
0x00A7 => Code::BrowserForward,
0x00A9 => Code::Eject,
0x00AB => Code::MediaTrackNext,
0x00AC => Code::MediaPlayPause,
0x00AD => Code::MediaTrackPrevious,
0x00AE => Code::MediaStop,
0x00B3 => Code::MediaSelect,
0x00B4 => Code::BrowserHome,
0x00B5 => Code::BrowserRefresh,
0x00E1 => Code::BrowserSearch,
_ => Code::Unidentified,
}
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::MOD_MASK_SHIFT, Modifiers::SHIFT),
(xproto::MOD_MASK_CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::MOD_MASK_1, Modifiers::ALT),
(xproto::MOD_MASK_2, Modifiers::NUM_LOCK),
(xproto::MOD_MASK_4, Modifiers::META),
(xproto::MOD_MASK_LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & (*mask as u16)!= 0 {
ret |= *modifiers;
}
}
ret
}
pub(super) fn convert_key_press_event(key_press: &xcb::KeyPressEvent) -> KeyboardEvent {
let hw_keycode = key_press.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_press.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Down;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
pub(super) fn convert_key_release_event(key_release: &xcb::KeyReleaseEvent) -> KeyboardEvent {
let hw_keycode = key_release.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_release.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Up;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
| {
if mods.contains(Modifiers::SHIFT) {
Key::Character(shifted.into())
} else {
Key::Character(base.into())
}
} | identifier_body |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset!= 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset
for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else {
false
}
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self |
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn deserialize_empty() {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
}
| {
watched.bitfield
} | identifier_body |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset!= 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset
for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else {
false
}
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self {
watched.bitfield
}
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn | () {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
}
| deserialize_empty | identifier_name |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset!= 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset
for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else |
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self {
watched.bitfield
}
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn deserialize_empty() {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
}
| {
false
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.