file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
day25.rs
|
use std::collections::HashMap;
use std::io::Read;
use crate::common::from_lines;
use crate::Solution;
const MOD_BASE: u32 = 20201227;
const SUBJECT_NUMBER: u32 = 7;
fn loop_count(public_key: u32) -> u64 {
discrete_log(SUBJECT_NUMBER, public_key, MOD_BASE).unwrap() as u64
}
// Implementation of the baby-step giant-step algorithm
//
// Based on:https://en.wikipedia.org/wiki/Baby-step_giant-step#C++_algorithm_(C++17)
fn discrete_log(g: u32, h: u32, mod_base: u32) -> Option<u32> {
let m = (mod_base as f64).sqrt().ceil() as u32;
let mut table = HashMap::with_capacity(m as usize);
let mut e: u32 = 1;
for i in 0..m {
table.insert(e, i);
e = ((e as u64 * g as u64) % mod_base as u64) as u32;
}
let factor = mod_exp(g as u64, (mod_base - m - 1) as u64, mod_base as u64);
e = h;
for i in 0..m {
if let Some(&val) = table.get(&e) {
return Some(i * m + val);
}
e = ((e as u64 * factor) % mod_base as u64) as u32;
}
None
}
#[inline]
fn mod_exp(base: u64, mut power: u64, mod_base: u64) -> u64 {
let mut result = 1;
let mut cur = base;
while power > 0 {
if power % 2 == 1 {
result *= cur;
result %= mod_base;
}
cur *= cur;
cur %= mod_base;
power /= 2;
}
result
}
#[derive(Default)]
pub struct Day25;
impl Solution for Day25 {
fn part1(&mut self, input: &mut dyn Read) -> String {
let nums: Vec<_> = from_lines(input);
let key_exponent = loop_count(nums[0]);
mod_exp(nums[1] as u64, key_exponent, MOD_BASE as u64).to_string()
}
fn part2(&mut self, _input: &mut dyn Read) -> String {
"Part 2 is free!".to_string()
}
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("../samples/25.txt");
|
fn test_loop_count() {
assert_eq!(8, loop_count(5764801));
assert_eq!(11, loop_count(17807724));
}
#[test]
fn sample_part1() {
test_implementation(Day25, 1, SAMPLE, 14897079);
}
}
|
#[test]
|
random_line_split
|
aes_gcm_hkdf.rs
|
// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! AES-GCM-HKDF based implementation of the [`tink_core::StreamingAead`] trait.
use super::{noncebased, AesVariant};
use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead};
use std::convert::TryInto;
use tink_core::{subtle::random::get_random_bytes, utils::wrap_err, TinkError};
use tink_proto::HashType;
/// The size of the nonces used for GCM.
pub const AES_GCM_HKDF_NONCE_SIZE_IN_BYTES: usize = 12;
/// The size of the randomly generated nonce prefix.
pub const AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES: usize = 7;
/// The size of the tags of each ciphertext segment.
pub const AES_GCM_HKDF_TAG_SIZE_IN_BYTES: usize = 16;
/// `AesGcmHkdf` implements streaming AEAD encryption using AES-GCM.
///
/// Each ciphertext uses a new AES-GCM key. These keys are derived using HKDF
/// and are derived from the key derivation key, a randomly chosen salt of the
/// same size as the key and a nonce prefix.
#[derive(Clone)]
pub struct AesGcmHkdf {
pub main_key: Vec<u8>,
hkdf_alg: HashType,
aes_variant: AesVariant,
ciphertext_segment_size: usize,
first_ciphertext_segment_offset: usize,
plaintext_segment_size: usize,
}
#[derive(Clone)]
enum AesGcmKeyVariant {
Aes128(Box<aes_gcm::Aes128Gcm>),
Aes256(Box<aes_gcm::Aes256Gcm>),
}
/// Calculate the header length for a given key size. The header includes
/// space for:
/// - a single byte indicating header length
/// - a salt that is the same size as the sub key
/// - a nonce prefix.
fn header_length_for(key_size_in_bytes: usize) -> usize {
1 + key_size_in_bytes + AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES
}
impl AesGcmHkdf {
/// Initialize a streaming primitive with a key derivation key
/// and encryption parameters.
///
/// `main_key` is input keying material used to derive sub keys. This must be
/// longer than the size of the sub keys (`key_size_in_bytes`).
/// `hkdf_alg` is a MAC algorithm hash type, used for the HKDF key derivation.
/// `key_size_in_bytes` argument is a key size of the sub keys.
/// `ciphertext_segment_size` argument is the size of ciphertext segments.
/// `first_segment_offset` argument is the offset of the first ciphertext segment.
pub fn new(
main_key: &[u8],
hkdf_alg: HashType,
key_size_in_bytes: usize,
ciphertext_segment_size: usize,
first_segment_offset: usize,
) -> Result<AesGcmHkdf, TinkError> {
if main_key.len() < 16 || main_key.len() < key_size_in_bytes {
return Err("main_key too short".into());
}
let aes_variant = super::validate_aes_key_size(key_size_in_bytes)?;
let header_len = header_length_for(key_size_in_bytes);
if ciphertext_segment_size
<= first_segment_offset + header_len + AES_GCM_HKDF_TAG_SIZE_IN_BYTES
{
return Err("ciphertext_segment_size too small".into());
}
Ok(AesGcmHkdf {
main_key: main_key.to_vec(),
hkdf_alg,
aes_variant,
ciphertext_segment_size,
first_ciphertext_segment_offset: first_segment_offset + header_len,
plaintext_segment_size: ciphertext_segment_size - AES_GCM_HKDF_TAG_SIZE_IN_BYTES,
})
}
/// Return the length of the encryption header.
pub fn header_length(&self) -> usize {
header_length_for(self.aes_variant.key_size())
}
/// Return a key derived from the given main key using `salt` and `aad` parameters.
fn derive_key(&self, salt: &[u8], aad: &[u8]) -> Result<Vec<u8>, TinkError> {
tink_core::subtle::compute_hkdf(
self.hkdf_alg,
&self.main_key,
salt,
aad,
self.aes_variant.key_size(),
)
}
}
impl tink_core::StreamingAead for AesGcmHkdf {
/// Return a wrapper around an underlying [`std::io::Write`], such that
/// any write-operation via the wrapper results in AEAD-encryption of the
/// written data, using aad as associated authenticated data. The associated
/// data is not included in the ciphertext and has to be passed in as parameter
/// for decryption.
fn new_encrypting_writer(
&self,
mut w: Box<dyn std::io::Write>,
aad: &[u8],
) -> Result<Box<dyn tink_core::EncryptingWrite>, TinkError> {
let salt = get_random_bytes(self.aes_variant.key_size());
let nonce_prefix = get_random_bytes(AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES);
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let mut header = Vec::with_capacity(self.header_length());
header.push(
self.header_length()
.try_into()
.map_err(|e| wrap_err("header length too long", e))?,
);
header.extend_from_slice(&salt);
header.extend_from_slice(&nonce_prefix);
w.write(&header).map_err(|e| wrap_err("write failed", e))?;
let nw = noncebased::Writer::new(noncebased::WriterParams {
w,
segment_encrypter: Box::new(AesGcmHkdfSegmentEncrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
plaintext_segment_size: self.plaintext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nw))
}
/// Return a wrapper around an underlying [`std::io::Read`], such that
/// any read-operation via the wrapper results in AEAD-decryption of the
/// underlying ciphertext, using aad as associated authenticated data.
fn new_decrypting_reader(
&self,
mut r: Box<dyn std::io::Read>,
aad: &[u8],
) -> Result<Box<dyn std::io::Read>, TinkError> {
let mut hlen = vec![0; 1];
r.read_exact(&mut hlen)
.map_err(|e| wrap_err("failed to reader header len", e))?;
if hlen[0] as usize!= self.header_length() {
return Err("invalid header length".into());
}
let mut salt = vec![0; self.aes_variant.key_size()];
r.read_exact(&mut salt)
.map_err(|e| wrap_err("cannot read salt", e))?;
let mut nonce_prefix = vec![0; AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES];
r.read_exact(&mut nonce_prefix)
.map_err(|e| wrap_err("cannot read nonce_prefix", e))?;
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let nr = noncebased::Reader::new(noncebased::ReaderParams {
r,
segment_decrypter: Box::new(AesGcmHkdfSegmentDecrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
ciphertext_segment_size: self.ciphertext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nr))
}
}
/// Create a new AES-GCM cipher key using the given key and the crypto library.
fn new_cipher_key(aes_variant: AesVariant, key: &[u8]) -> Result<AesGcmKeyVariant, TinkError>
|
/// A [`noncebased::SegmentEncrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentEncrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentEncrypter for AesGcmHkdfSegmentEncrypter {
fn encrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.encrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.encrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: encryption failed", e))
}
}
/// A [`noncebased::SegmentDecrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentDecrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentDecrypter for AesGcmHkdfSegmentDecrypter {
fn decrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.decrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.decrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: decryption failed", e))
}
}
|
{
match aes_variant {
AesVariant::Aes128 => Ok(AesGcmKeyVariant::Aes128(Box::new(aes_gcm::Aes128Gcm::new(
GenericArray::from_slice(key),
)))),
AesVariant::Aes256 => Ok(AesGcmKeyVariant::Aes256(Box::new(aes_gcm::Aes256Gcm::new(
GenericArray::from_slice(key),
)))),
}
}
|
identifier_body
|
aes_gcm_hkdf.rs
|
// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! AES-GCM-HKDF based implementation of the [`tink_core::StreamingAead`] trait.
use super::{noncebased, AesVariant};
use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead};
use std::convert::TryInto;
use tink_core::{subtle::random::get_random_bytes, utils::wrap_err, TinkError};
use tink_proto::HashType;
/// The size of the nonces used for GCM.
pub const AES_GCM_HKDF_NONCE_SIZE_IN_BYTES: usize = 12;
/// The size of the randomly generated nonce prefix.
pub const AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES: usize = 7;
/// The size of the tags of each ciphertext segment.
pub const AES_GCM_HKDF_TAG_SIZE_IN_BYTES: usize = 16;
/// `AesGcmHkdf` implements streaming AEAD encryption using AES-GCM.
///
/// Each ciphertext uses a new AES-GCM key. These keys are derived using HKDF
/// and are derived from the key derivation key, a randomly chosen salt of the
/// same size as the key and a nonce prefix.
#[derive(Clone)]
pub struct AesGcmHkdf {
pub main_key: Vec<u8>,
hkdf_alg: HashType,
aes_variant: AesVariant,
ciphertext_segment_size: usize,
first_ciphertext_segment_offset: usize,
plaintext_segment_size: usize,
}
#[derive(Clone)]
enum AesGcmKeyVariant {
Aes128(Box<aes_gcm::Aes128Gcm>),
Aes256(Box<aes_gcm::Aes256Gcm>),
}
/// Calculate the header length for a given key size. The header includes
/// space for:
/// - a single byte indicating header length
/// - a salt that is the same size as the sub key
/// - a nonce prefix.
fn header_length_for(key_size_in_bytes: usize) -> usize {
1 + key_size_in_bytes + AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES
}
impl AesGcmHkdf {
/// Initialize a streaming primitive with a key derivation key
/// and encryption parameters.
///
/// `main_key` is input keying material used to derive sub keys. This must be
/// longer than the size of the sub keys (`key_size_in_bytes`).
/// `hkdf_alg` is a MAC algorithm hash type, used for the HKDF key derivation.
/// `key_size_in_bytes` argument is a key size of the sub keys.
/// `ciphertext_segment_size` argument is the size of ciphertext segments.
/// `first_segment_offset` argument is the offset of the first ciphertext segment.
pub fn new(
main_key: &[u8],
hkdf_alg: HashType,
key_size_in_bytes: usize,
ciphertext_segment_size: usize,
first_segment_offset: usize,
) -> Result<AesGcmHkdf, TinkError> {
if main_key.len() < 16 || main_key.len() < key_size_in_bytes {
return Err("main_key too short".into());
}
let aes_variant = super::validate_aes_key_size(key_size_in_bytes)?;
let header_len = header_length_for(key_size_in_bytes);
if ciphertext_segment_size
<= first_segment_offset + header_len + AES_GCM_HKDF_TAG_SIZE_IN_BYTES
{
return Err("ciphertext_segment_size too small".into());
}
Ok(AesGcmHkdf {
main_key: main_key.to_vec(),
hkdf_alg,
aes_variant,
ciphertext_segment_size,
first_ciphertext_segment_offset: first_segment_offset + header_len,
plaintext_segment_size: ciphertext_segment_size - AES_GCM_HKDF_TAG_SIZE_IN_BYTES,
})
}
/// Return the length of the encryption header.
pub fn header_length(&self) -> usize {
header_length_for(self.aes_variant.key_size())
}
/// Return a key derived from the given main key using `salt` and `aad` parameters.
fn derive_key(&self, salt: &[u8], aad: &[u8]) -> Result<Vec<u8>, TinkError> {
tink_core::subtle::compute_hkdf(
self.hkdf_alg,
&self.main_key,
salt,
aad,
self.aes_variant.key_size(),
)
}
}
impl tink_core::StreamingAead for AesGcmHkdf {
/// Return a wrapper around an underlying [`std::io::Write`], such that
/// any write-operation via the wrapper results in AEAD-encryption of the
/// written data, using aad as associated authenticated data. The associated
/// data is not included in the ciphertext and has to be passed in as parameter
/// for decryption.
fn new_encrypting_writer(
&self,
mut w: Box<dyn std::io::Write>,
aad: &[u8],
) -> Result<Box<dyn tink_core::EncryptingWrite>, TinkError> {
let salt = get_random_bytes(self.aes_variant.key_size());
let nonce_prefix = get_random_bytes(AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES);
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let mut header = Vec::with_capacity(self.header_length());
header.push(
self.header_length()
.try_into()
.map_err(|e| wrap_err("header length too long", e))?,
);
header.extend_from_slice(&salt);
header.extend_from_slice(&nonce_prefix);
w.write(&header).map_err(|e| wrap_err("write failed", e))?;
let nw = noncebased::Writer::new(noncebased::WriterParams {
w,
segment_encrypter: Box::new(AesGcmHkdfSegmentEncrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
plaintext_segment_size: self.plaintext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nw))
}
/// Return a wrapper around an underlying [`std::io::Read`], such that
/// any read-operation via the wrapper results in AEAD-decryption of the
/// underlying ciphertext, using aad as associated authenticated data.
fn new_decrypting_reader(
&self,
mut r: Box<dyn std::io::Read>,
aad: &[u8],
) -> Result<Box<dyn std::io::Read>, TinkError> {
let mut hlen = vec![0; 1];
r.read_exact(&mut hlen)
.map_err(|e| wrap_err("failed to reader header len", e))?;
if hlen[0] as usize!= self.header_length() {
return Err("invalid header length".into());
}
let mut salt = vec![0; self.aes_variant.key_size()];
r.read_exact(&mut salt)
.map_err(|e| wrap_err("cannot read salt", e))?;
let mut nonce_prefix = vec![0; AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES];
r.read_exact(&mut nonce_prefix)
.map_err(|e| wrap_err("cannot read nonce_prefix", e))?;
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let nr = noncebased::Reader::new(noncebased::ReaderParams {
r,
segment_decrypter: Box::new(AesGcmHkdfSegmentDecrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
ciphertext_segment_size: self.ciphertext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nr))
}
}
/// Create a new AES-GCM cipher key using the given key and the crypto library.
fn new_cipher_key(aes_variant: AesVariant, key: &[u8]) -> Result<AesGcmKeyVariant, TinkError> {
match aes_variant {
AesVariant::Aes128 => Ok(AesGcmKeyVariant::Aes128(Box::new(aes_gcm::Aes128Gcm::new(
GenericArray::from_slice(key),
)))),
AesVariant::Aes256 => Ok(AesGcmKeyVariant::Aes256(Box::new(aes_gcm::Aes256Gcm::new(
GenericArray::from_slice(key),
)))),
}
}
/// A [`noncebased::SegmentEncrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentEncrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentEncrypter for AesGcmHkdfSegmentEncrypter {
fn encrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
|
}
/// A [`noncebased::SegmentDecrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentDecrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentDecrypter for AesGcmHkdfSegmentDecrypter {
fn decrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.decrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.decrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: decryption failed", e))
}
}
|
AesGcmKeyVariant::Aes128(key) => key.encrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.encrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: encryption failed", e))
}
|
random_line_split
|
aes_gcm_hkdf.rs
|
// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! AES-GCM-HKDF based implementation of the [`tink_core::StreamingAead`] trait.
use super::{noncebased, AesVariant};
use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead};
use std::convert::TryInto;
use tink_core::{subtle::random::get_random_bytes, utils::wrap_err, TinkError};
use tink_proto::HashType;
/// The size of the nonces used for GCM.
pub const AES_GCM_HKDF_NONCE_SIZE_IN_BYTES: usize = 12;
/// The size of the randomly generated nonce prefix.
pub const AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES: usize = 7;
/// The size of the tags of each ciphertext segment.
pub const AES_GCM_HKDF_TAG_SIZE_IN_BYTES: usize = 16;
/// `AesGcmHkdf` implements streaming AEAD encryption using AES-GCM.
///
/// Each ciphertext uses a new AES-GCM key. These keys are derived using HKDF
/// and are derived from the key derivation key, a randomly chosen salt of the
/// same size as the key and a nonce prefix.
#[derive(Clone)]
pub struct AesGcmHkdf {
pub main_key: Vec<u8>,
hkdf_alg: HashType,
aes_variant: AesVariant,
ciphertext_segment_size: usize,
first_ciphertext_segment_offset: usize,
plaintext_segment_size: usize,
}
#[derive(Clone)]
enum
|
{
Aes128(Box<aes_gcm::Aes128Gcm>),
Aes256(Box<aes_gcm::Aes256Gcm>),
}
/// Calculate the header length for a given key size. The header includes
/// space for:
/// - a single byte indicating header length
/// - a salt that is the same size as the sub key
/// - a nonce prefix.
fn header_length_for(key_size_in_bytes: usize) -> usize {
1 + key_size_in_bytes + AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES
}
impl AesGcmHkdf {
/// Initialize a streaming primitive with a key derivation key
/// and encryption parameters.
///
/// `main_key` is input keying material used to derive sub keys. This must be
/// longer than the size of the sub keys (`key_size_in_bytes`).
/// `hkdf_alg` is a MAC algorithm hash type, used for the HKDF key derivation.
/// `key_size_in_bytes` argument is a key size of the sub keys.
/// `ciphertext_segment_size` argument is the size of ciphertext segments.
/// `first_segment_offset` argument is the offset of the first ciphertext segment.
pub fn new(
main_key: &[u8],
hkdf_alg: HashType,
key_size_in_bytes: usize,
ciphertext_segment_size: usize,
first_segment_offset: usize,
) -> Result<AesGcmHkdf, TinkError> {
if main_key.len() < 16 || main_key.len() < key_size_in_bytes {
return Err("main_key too short".into());
}
let aes_variant = super::validate_aes_key_size(key_size_in_bytes)?;
let header_len = header_length_for(key_size_in_bytes);
if ciphertext_segment_size
<= first_segment_offset + header_len + AES_GCM_HKDF_TAG_SIZE_IN_BYTES
{
return Err("ciphertext_segment_size too small".into());
}
Ok(AesGcmHkdf {
main_key: main_key.to_vec(),
hkdf_alg,
aes_variant,
ciphertext_segment_size,
first_ciphertext_segment_offset: first_segment_offset + header_len,
plaintext_segment_size: ciphertext_segment_size - AES_GCM_HKDF_TAG_SIZE_IN_BYTES,
})
}
/// Return the length of the encryption header.
pub fn header_length(&self) -> usize {
header_length_for(self.aes_variant.key_size())
}
/// Return a key derived from the given main key using `salt` and `aad` parameters.
fn derive_key(&self, salt: &[u8], aad: &[u8]) -> Result<Vec<u8>, TinkError> {
tink_core::subtle::compute_hkdf(
self.hkdf_alg,
&self.main_key,
salt,
aad,
self.aes_variant.key_size(),
)
}
}
impl tink_core::StreamingAead for AesGcmHkdf {
/// Return a wrapper around an underlying [`std::io::Write`], such that
/// any write-operation via the wrapper results in AEAD-encryption of the
/// written data, using aad as associated authenticated data. The associated
/// data is not included in the ciphertext and has to be passed in as parameter
/// for decryption.
fn new_encrypting_writer(
&self,
mut w: Box<dyn std::io::Write>,
aad: &[u8],
) -> Result<Box<dyn tink_core::EncryptingWrite>, TinkError> {
let salt = get_random_bytes(self.aes_variant.key_size());
let nonce_prefix = get_random_bytes(AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES);
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let mut header = Vec::with_capacity(self.header_length());
header.push(
self.header_length()
.try_into()
.map_err(|e| wrap_err("header length too long", e))?,
);
header.extend_from_slice(&salt);
header.extend_from_slice(&nonce_prefix);
w.write(&header).map_err(|e| wrap_err("write failed", e))?;
let nw = noncebased::Writer::new(noncebased::WriterParams {
w,
segment_encrypter: Box::new(AesGcmHkdfSegmentEncrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
plaintext_segment_size: self.plaintext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nw))
}
/// Return a wrapper around an underlying [`std::io::Read`], such that
/// any read-operation via the wrapper results in AEAD-decryption of the
/// underlying ciphertext, using aad as associated authenticated data.
fn new_decrypting_reader(
&self,
mut r: Box<dyn std::io::Read>,
aad: &[u8],
) -> Result<Box<dyn std::io::Read>, TinkError> {
let mut hlen = vec![0; 1];
r.read_exact(&mut hlen)
.map_err(|e| wrap_err("failed to reader header len", e))?;
if hlen[0] as usize!= self.header_length() {
return Err("invalid header length".into());
}
let mut salt = vec![0; self.aes_variant.key_size()];
r.read_exact(&mut salt)
.map_err(|e| wrap_err("cannot read salt", e))?;
let mut nonce_prefix = vec![0; AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES];
r.read_exact(&mut nonce_prefix)
.map_err(|e| wrap_err("cannot read nonce_prefix", e))?;
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let nr = noncebased::Reader::new(noncebased::ReaderParams {
r,
segment_decrypter: Box::new(AesGcmHkdfSegmentDecrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
ciphertext_segment_size: self.ciphertext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nr))
}
}
/// Create a new AES-GCM cipher key using the given key and the crypto library.
fn new_cipher_key(aes_variant: AesVariant, key: &[u8]) -> Result<AesGcmKeyVariant, TinkError> {
match aes_variant {
AesVariant::Aes128 => Ok(AesGcmKeyVariant::Aes128(Box::new(aes_gcm::Aes128Gcm::new(
GenericArray::from_slice(key),
)))),
AesVariant::Aes256 => Ok(AesGcmKeyVariant::Aes256(Box::new(aes_gcm::Aes256Gcm::new(
GenericArray::from_slice(key),
)))),
}
}
/// A [`noncebased::SegmentEncrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentEncrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentEncrypter for AesGcmHkdfSegmentEncrypter {
fn encrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.encrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.encrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: encryption failed", e))
}
}
/// A [`noncebased::SegmentDecrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentDecrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentDecrypter for AesGcmHkdfSegmentDecrypter {
fn decrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.decrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.decrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: decryption failed", e))
}
}
|
AesGcmKeyVariant
|
identifier_name
|
aes_gcm_hkdf.rs
|
// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! AES-GCM-HKDF based implementation of the [`tink_core::StreamingAead`] trait.
use super::{noncebased, AesVariant};
use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead};
use std::convert::TryInto;
use tink_core::{subtle::random::get_random_bytes, utils::wrap_err, TinkError};
use tink_proto::HashType;
/// The size of the nonces used for GCM.
pub const AES_GCM_HKDF_NONCE_SIZE_IN_BYTES: usize = 12;
/// The size of the randomly generated nonce prefix.
pub const AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES: usize = 7;
/// The size of the tags of each ciphertext segment.
pub const AES_GCM_HKDF_TAG_SIZE_IN_BYTES: usize = 16;
/// `AesGcmHkdf` implements streaming AEAD encryption using AES-GCM.
///
/// Each ciphertext uses a new AES-GCM key. These keys are derived using HKDF
/// and are derived from the key derivation key, a randomly chosen salt of the
/// same size as the key and a nonce prefix.
#[derive(Clone)]
pub struct AesGcmHkdf {
pub main_key: Vec<u8>,
hkdf_alg: HashType,
aes_variant: AesVariant,
ciphertext_segment_size: usize,
first_ciphertext_segment_offset: usize,
plaintext_segment_size: usize,
}
#[derive(Clone)]
enum AesGcmKeyVariant {
Aes128(Box<aes_gcm::Aes128Gcm>),
Aes256(Box<aes_gcm::Aes256Gcm>),
}
/// Calculate the header length for a given key size. The header includes
/// space for:
/// - a single byte indicating header length
/// - a salt that is the same size as the sub key
/// - a nonce prefix.
fn header_length_for(key_size_in_bytes: usize) -> usize {
1 + key_size_in_bytes + AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES
}
impl AesGcmHkdf {
/// Initialize a streaming primitive with a key derivation key
/// and encryption parameters.
///
/// `main_key` is input keying material used to derive sub keys. This must be
/// longer than the size of the sub keys (`key_size_in_bytes`).
/// `hkdf_alg` is a MAC algorithm hash type, used for the HKDF key derivation.
/// `key_size_in_bytes` argument is a key size of the sub keys.
/// `ciphertext_segment_size` argument is the size of ciphertext segments.
/// `first_segment_offset` argument is the offset of the first ciphertext segment.
pub fn new(
main_key: &[u8],
hkdf_alg: HashType,
key_size_in_bytes: usize,
ciphertext_segment_size: usize,
first_segment_offset: usize,
) -> Result<AesGcmHkdf, TinkError> {
if main_key.len() < 16 || main_key.len() < key_size_in_bytes {
return Err("main_key too short".into());
}
let aes_variant = super::validate_aes_key_size(key_size_in_bytes)?;
let header_len = header_length_for(key_size_in_bytes);
if ciphertext_segment_size
<= first_segment_offset + header_len + AES_GCM_HKDF_TAG_SIZE_IN_BYTES
{
return Err("ciphertext_segment_size too small".into());
}
Ok(AesGcmHkdf {
main_key: main_key.to_vec(),
hkdf_alg,
aes_variant,
ciphertext_segment_size,
first_ciphertext_segment_offset: first_segment_offset + header_len,
plaintext_segment_size: ciphertext_segment_size - AES_GCM_HKDF_TAG_SIZE_IN_BYTES,
})
}
/// Return the length of the encryption header.
pub fn header_length(&self) -> usize {
header_length_for(self.aes_variant.key_size())
}
/// Return a key derived from the given main key using `salt` and `aad` parameters.
fn derive_key(&self, salt: &[u8], aad: &[u8]) -> Result<Vec<u8>, TinkError> {
tink_core::subtle::compute_hkdf(
self.hkdf_alg,
&self.main_key,
salt,
aad,
self.aes_variant.key_size(),
)
}
}
impl tink_core::StreamingAead for AesGcmHkdf {
/// Return a wrapper around an underlying [`std::io::Write`], such that
/// any write-operation via the wrapper results in AEAD-encryption of the
/// written data, using aad as associated authenticated data. The associated
/// data is not included in the ciphertext and has to be passed in as parameter
/// for decryption.
fn new_encrypting_writer(
&self,
mut w: Box<dyn std::io::Write>,
aad: &[u8],
) -> Result<Box<dyn tink_core::EncryptingWrite>, TinkError> {
let salt = get_random_bytes(self.aes_variant.key_size());
let nonce_prefix = get_random_bytes(AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES);
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let mut header = Vec::with_capacity(self.header_length());
header.push(
self.header_length()
.try_into()
.map_err(|e| wrap_err("header length too long", e))?,
);
header.extend_from_slice(&salt);
header.extend_from_slice(&nonce_prefix);
w.write(&header).map_err(|e| wrap_err("write failed", e))?;
let nw = noncebased::Writer::new(noncebased::WriterParams {
w,
segment_encrypter: Box::new(AesGcmHkdfSegmentEncrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
plaintext_segment_size: self.plaintext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nw))
}
/// Return a wrapper around an underlying [`std::io::Read`], such that
/// any read-operation via the wrapper results in AEAD-decryption of the
/// underlying ciphertext, using aad as associated authenticated data.
fn new_decrypting_reader(
&self,
mut r: Box<dyn std::io::Read>,
aad: &[u8],
) -> Result<Box<dyn std::io::Read>, TinkError> {
let mut hlen = vec![0; 1];
r.read_exact(&mut hlen)
.map_err(|e| wrap_err("failed to reader header len", e))?;
if hlen[0] as usize!= self.header_length()
|
let mut salt = vec![0; self.aes_variant.key_size()];
r.read_exact(&mut salt)
.map_err(|e| wrap_err("cannot read salt", e))?;
let mut nonce_prefix = vec![0; AES_GCM_HKDF_NONCE_PREFIX_SIZE_IN_BYTES];
r.read_exact(&mut nonce_prefix)
.map_err(|e| wrap_err("cannot read nonce_prefix", e))?;
let dkey = self.derive_key(&salt, aad)?;
let cipher_key = new_cipher_key(self.aes_variant, &dkey)?;
let nr = noncebased::Reader::new(noncebased::ReaderParams {
r,
segment_decrypter: Box::new(AesGcmHkdfSegmentDecrypter { cipher_key }),
nonce_size: AES_GCM_HKDF_NONCE_SIZE_IN_BYTES,
nonce_prefix,
ciphertext_segment_size: self.ciphertext_segment_size,
first_ciphertext_segment_offset: self.first_ciphertext_segment_offset,
})?;
Ok(Box::new(nr))
}
}
/// Create a new AES-GCM cipher key using the given key and the crypto library.
fn new_cipher_key(aes_variant: AesVariant, key: &[u8]) -> Result<AesGcmKeyVariant, TinkError> {
match aes_variant {
AesVariant::Aes128 => Ok(AesGcmKeyVariant::Aes128(Box::new(aes_gcm::Aes128Gcm::new(
GenericArray::from_slice(key),
)))),
AesVariant::Aes256 => Ok(AesGcmKeyVariant::Aes256(Box::new(aes_gcm::Aes256Gcm::new(
GenericArray::from_slice(key),
)))),
}
}
/// A [`noncebased::SegmentEncrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentEncrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentEncrypter for AesGcmHkdfSegmentEncrypter {
fn encrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.encrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.encrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: encryption failed", e))
}
}
/// A [`noncebased::SegmentDecrypter`] based on AES-GCM-HKDF.
struct AesGcmHkdfSegmentDecrypter {
cipher_key: AesGcmKeyVariant,
}
impl noncebased::SegmentDecrypter for AesGcmHkdfSegmentDecrypter {
fn decrypt_segment(&self, segment: &[u8], nonce: &[u8]) -> Result<Vec<u8>, TinkError> {
let iv = GenericArray::from_slice(nonce);
match &self.cipher_key {
AesGcmKeyVariant::Aes128(key) => key.decrypt(iv, segment),
AesGcmKeyVariant::Aes256(key) => key.decrypt(iv, segment),
}
.map_err(|e| wrap_err("AesGcmHkdf: decryption failed", e))
}
}
|
{
return Err("invalid header length".into());
}
|
conditional_block
|
htmloptionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLOptionElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOptionElementDerived;
use dom::bindings::js::JS;
use dom::bindings::error::ErrorResult;
use dom::document::Document;
use dom::element::HTMLOptionElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::htmlformelement::HTMLFormElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLOptionElement {
htmlelement: HTMLElement
}
impl HTMLOptionElementDerived for EventTarget {
fn is_htmloptionelement(&self) -> bool {
match self.type_id {
NodeTargetTypeId(ElementNodeTypeId(HTMLOptionElementTypeId)) => true,
_ => false
}
}
}
impl HTMLOptionElement {
pub fn new_inherited(localName: DOMString, document: JS<Document>) -> HTMLOptionElement {
HTMLOptionElement {
htmlelement: HTMLElement::new_inherited(HTMLOptionElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JS<Document>) -> JS<HTMLOptionElement> {
let element = HTMLOptionElement::new_inherited(localName, document.clone());
Node::reflect_node(~element, document, HTMLOptionElementBinding::Wrap)
}
}
impl HTMLOptionElement {
pub fn Disabled(&self) -> bool {
false
}
pub fn SetDisabled(&mut self, _disabled: bool) -> ErrorResult {
Ok(())
}
pub fn GetForm(&self) -> Option<JS<HTMLFormElement>> {
None
}
pub fn Label(&self) -> DOMString {
~""
}
pub fn SetLabel(&mut self, _label: DOMString) -> ErrorResult
|
pub fn DefaultSelected(&self) -> bool {
false
}
pub fn SetDefaultSelected(&mut self, _default_selected: bool) -> ErrorResult {
Ok(())
}
pub fn Selected(&self) -> bool {
false
}
pub fn SetSelected(&mut self, _selected: bool) -> ErrorResult {
Ok(())
}
pub fn Value(&self) -> DOMString {
~""
}
pub fn SetValue(&mut self, _value: DOMString) -> ErrorResult {
Ok(())
}
pub fn Text(&self) -> DOMString {
~""
}
pub fn SetText(&mut self, _text: DOMString) -> ErrorResult {
Ok(())
}
pub fn Index(&self) -> i32 {
0
}
}
|
{
Ok(())
}
|
identifier_body
|
htmloptionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLOptionElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOptionElementDerived;
use dom::bindings::js::JS;
use dom::bindings::error::ErrorResult;
use dom::document::Document;
use dom::element::HTMLOptionElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::htmlformelement::HTMLFormElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLOptionElement {
htmlelement: HTMLElement
}
impl HTMLOptionElementDerived for EventTarget {
fn is_htmloptionelement(&self) -> bool {
match self.type_id {
NodeTargetTypeId(ElementNodeTypeId(HTMLOptionElementTypeId)) => true,
_ => false
}
}
}
impl HTMLOptionElement {
pub fn new_inherited(localName: DOMString, document: JS<Document>) -> HTMLOptionElement {
HTMLOptionElement {
htmlelement: HTMLElement::new_inherited(HTMLOptionElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JS<Document>) -> JS<HTMLOptionElement> {
let element = HTMLOptionElement::new_inherited(localName, document.clone());
Node::reflect_node(~element, document, HTMLOptionElementBinding::Wrap)
}
}
impl HTMLOptionElement {
pub fn Disabled(&self) -> bool {
false
}
pub fn SetDisabled(&mut self, _disabled: bool) -> ErrorResult {
Ok(())
}
pub fn GetForm(&self) -> Option<JS<HTMLFormElement>> {
None
}
pub fn Label(&self) -> DOMString {
~""
}
pub fn SetLabel(&mut self, _label: DOMString) -> ErrorResult {
Ok(())
}
pub fn DefaultSelected(&self) -> bool {
false
}
pub fn SetDefaultSelected(&mut self, _default_selected: bool) -> ErrorResult {
Ok(())
}
pub fn Selected(&self) -> bool {
false
}
pub fn SetSelected(&mut self, _selected: bool) -> ErrorResult {
Ok(())
}
pub fn Value(&self) -> DOMString {
~""
}
pub fn
|
(&mut self, _value: DOMString) -> ErrorResult {
Ok(())
}
pub fn Text(&self) -> DOMString {
~""
}
pub fn SetText(&mut self, _text: DOMString) -> ErrorResult {
Ok(())
}
pub fn Index(&self) -> i32 {
0
}
}
|
SetValue
|
identifier_name
|
htmloptionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLOptionElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOptionElementDerived;
use dom::bindings::js::JS;
use dom::bindings::error::ErrorResult;
use dom::document::Document;
use dom::element::HTMLOptionElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::htmlformelement::HTMLFormElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLOptionElement {
htmlelement: HTMLElement
}
impl HTMLOptionElementDerived for EventTarget {
fn is_htmloptionelement(&self) -> bool {
match self.type_id {
NodeTargetTypeId(ElementNodeTypeId(HTMLOptionElementTypeId)) => true,
_ => false
}
}
}
impl HTMLOptionElement {
pub fn new_inherited(localName: DOMString, document: JS<Document>) -> HTMLOptionElement {
HTMLOptionElement {
htmlelement: HTMLElement::new_inherited(HTMLOptionElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JS<Document>) -> JS<HTMLOptionElement> {
let element = HTMLOptionElement::new_inherited(localName, document.clone());
Node::reflect_node(~element, document, HTMLOptionElementBinding::Wrap)
}
}
impl HTMLOptionElement {
pub fn Disabled(&self) -> bool {
false
}
pub fn SetDisabled(&mut self, _disabled: bool) -> ErrorResult {
Ok(())
}
pub fn GetForm(&self) -> Option<JS<HTMLFormElement>> {
None
}
pub fn Label(&self) -> DOMString {
~""
}
pub fn SetLabel(&mut self, _label: DOMString) -> ErrorResult {
Ok(())
}
pub fn DefaultSelected(&self) -> bool {
false
}
pub fn SetDefaultSelected(&mut self, _default_selected: bool) -> ErrorResult {
Ok(())
}
pub fn Selected(&self) -> bool {
false
}
|
pub fn Value(&self) -> DOMString {
~""
}
pub fn SetValue(&mut self, _value: DOMString) -> ErrorResult {
Ok(())
}
pub fn Text(&self) -> DOMString {
~""
}
pub fn SetText(&mut self, _text: DOMString) -> ErrorResult {
Ok(())
}
pub fn Index(&self) -> i32 {
0
}
}
|
pub fn SetSelected(&mut self, _selected: bool) -> ErrorResult {
Ok(())
}
|
random_line_split
|
proximity_ball_ball.rs
|
use crate::math::Point;
use crate::query::Proximity;
use crate::shape::Ball;
use na::{self, RealField};
/// Proximity between balls.
#[inline]
pub fn proximity_ball_ball<N: RealField>(
center1: &Point<N>,
b1: &Ball<N>,
center2: &Point<N>,
b2: &Ball<N>,
margin: N,
) -> Proximity {
assert!(
margin >= na::zero(),
"The proximity margin must be positive or null."
);
let r1 = b1.radius;
let r2 = b2.radius;
let delta_pos = *center2 - *center1;
let distance_squared = delta_pos.norm_squared();
let sum_radius = r1 + r2;
let sum_radius_with_error = sum_radius + margin;
if distance_squared <= sum_radius_with_error * sum_radius_with_error
|
else {
Proximity::Disjoint
}
}
|
{
if distance_squared <= sum_radius * sum_radius {
Proximity::Intersecting
} else {
Proximity::WithinMargin
}
}
|
conditional_block
|
proximity_ball_ball.rs
|
use crate::math::Point;
use crate::query::Proximity;
use crate::shape::Ball;
use na::{self, RealField};
/// Proximity between balls.
#[inline]
pub fn proximity_ball_ball<N: RealField>(
center1: &Point<N>,
b1: &Ball<N>,
center2: &Point<N>,
b2: &Ball<N>,
margin: N,
) -> Proximity
|
Proximity::Disjoint
}
}
|
{
assert!(
margin >= na::zero(),
"The proximity margin must be positive or null."
);
let r1 = b1.radius;
let r2 = b2.radius;
let delta_pos = *center2 - *center1;
let distance_squared = delta_pos.norm_squared();
let sum_radius = r1 + r2;
let sum_radius_with_error = sum_radius + margin;
if distance_squared <= sum_radius_with_error * sum_radius_with_error {
if distance_squared <= sum_radius * sum_radius {
Proximity::Intersecting
} else {
Proximity::WithinMargin
}
} else {
|
identifier_body
|
proximity_ball_ball.rs
|
use crate::math::Point;
use crate::query::Proximity;
use crate::shape::Ball;
use na::{self, RealField};
/// Proximity between balls.
#[inline]
pub fn
|
<N: RealField>(
center1: &Point<N>,
b1: &Ball<N>,
center2: &Point<N>,
b2: &Ball<N>,
margin: N,
) -> Proximity {
assert!(
margin >= na::zero(),
"The proximity margin must be positive or null."
);
let r1 = b1.radius;
let r2 = b2.radius;
let delta_pos = *center2 - *center1;
let distance_squared = delta_pos.norm_squared();
let sum_radius = r1 + r2;
let sum_radius_with_error = sum_radius + margin;
if distance_squared <= sum_radius_with_error * sum_radius_with_error {
if distance_squared <= sum_radius * sum_radius {
Proximity::Intersecting
} else {
Proximity::WithinMargin
}
} else {
Proximity::Disjoint
}
}
|
proximity_ball_ball
|
identifier_name
|
proximity_ball_ball.rs
|
use crate::math::Point;
use crate::query::Proximity;
use crate::shape::Ball;
use na::{self, RealField};
/// Proximity between balls.
#[inline]
pub fn proximity_ball_ball<N: RealField>(
center1: &Point<N>,
b1: &Ball<N>,
center2: &Point<N>,
b2: &Ball<N>,
margin: N,
|
let r1 = b1.radius;
let r2 = b2.radius;
let delta_pos = *center2 - *center1;
let distance_squared = delta_pos.norm_squared();
let sum_radius = r1 + r2;
let sum_radius_with_error = sum_radius + margin;
if distance_squared <= sum_radius_with_error * sum_radius_with_error {
if distance_squared <= sum_radius * sum_radius {
Proximity::Intersecting
} else {
Proximity::WithinMargin
}
} else {
Proximity::Disjoint
}
}
|
) -> Proximity {
assert!(
margin >= na::zero(),
"The proximity margin must be positive or null."
);
|
random_line_split
|
derive_object_with_raw_idents.rs
|
use juniper::{
execute, graphql_object, graphql_value, EmptyMutation, EmptySubscription, GraphQLInputObject,
RootNode, Value, Variables,
};
pub struct Query;
#[graphql_object]
impl Query {
fn r#type(r#fn: MyInputType) -> Vec<String> {
let _ = r#fn;
unimplemented!()
}
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct MyInputType {
r#trait: String,
}
#[tokio::test]
async fn supports_raw_idents_in_types_and_args() {
let doc = r#"
{
__type(name: "Query") {
fields {
name
args {
name
}
}
}
}
"#;
let value = run_type_info_query(&doc).await;
assert_eq!(
value,
graphql_value!(
{
"__type": {
"fields": [
{
"name": "type",
"args": [
{
"name": "fn"
}
]
|
}
]
}
}
),
);
}
#[tokio::test]
async fn supports_raw_idents_in_fields_of_input_types() {
let doc = r#"
{
__type(name: "MyInputType") {
inputFields {
name
}
}
}
"#;
let value = run_type_info_query(&doc).await;
assert_eq!(
value,
graphql_value!(
{
"__type": {
"inputFields": [
{
"name": "trait",
}
]
}
}
),
);
}
async fn run_type_info_query(doc: &str) -> Value {
let schema = RootNode::new(
Query,
EmptyMutation::<()>::new(),
EmptySubscription::<()>::new(),
);
let (result, errs) = execute(doc, None, &schema, &Variables::new(), &())
.await
.expect("Execution failed");
assert_eq!(errs, []);
println!("Result: {:#?}", result);
result
}
|
random_line_split
|
|
derive_object_with_raw_idents.rs
|
use juniper::{
execute, graphql_object, graphql_value, EmptyMutation, EmptySubscription, GraphQLInputObject,
RootNode, Value, Variables,
};
pub struct Query;
#[graphql_object]
impl Query {
fn r#type(r#fn: MyInputType) -> Vec<String> {
let _ = r#fn;
unimplemented!()
}
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct MyInputType {
r#trait: String,
}
#[tokio::test]
async fn supports_raw_idents_in_types_and_args() {
let doc = r#"
{
__type(name: "Query") {
fields {
name
args {
name
}
}
}
}
"#;
let value = run_type_info_query(&doc).await;
assert_eq!(
value,
graphql_value!(
{
"__type": {
"fields": [
{
"name": "type",
"args": [
{
"name": "fn"
}
]
}
]
}
}
),
);
}
#[tokio::test]
async fn supports_raw_idents_in_fields_of_input_types()
|
"name": "trait",
}
]
}
}
),
);
}
async fn run_type_info_query(doc: &str) -> Value {
let schema = RootNode::new(
Query,
EmptyMutation::<()>::new(),
EmptySubscription::<()>::new(),
);
let (result, errs) = execute(doc, None, &schema, &Variables::new(), &())
.await
.expect("Execution failed");
assert_eq!(errs, []);
println!("Result: {:#?}", result);
result
}
|
{
let doc = r#"
{
__type(name: "MyInputType") {
inputFields {
name
}
}
}
"#;
let value = run_type_info_query(&doc).await;
assert_eq!(
value,
graphql_value!(
{
"__type": {
"inputFields": [
{
|
identifier_body
|
derive_object_with_raw_idents.rs
|
use juniper::{
execute, graphql_object, graphql_value, EmptyMutation, EmptySubscription, GraphQLInputObject,
RootNode, Value, Variables,
};
pub struct Query;
#[graphql_object]
impl Query {
fn r#type(r#fn: MyInputType) -> Vec<String> {
let _ = r#fn;
unimplemented!()
}
}
#[derive(GraphQLInputObject, Debug, PartialEq)]
struct MyInputType {
r#trait: String,
}
#[tokio::test]
async fn supports_raw_idents_in_types_and_args() {
let doc = r#"
{
__type(name: "Query") {
fields {
name
args {
name
}
}
}
}
"#;
let value = run_type_info_query(&doc).await;
assert_eq!(
value,
graphql_value!(
{
"__type": {
"fields": [
{
"name": "type",
"args": [
{
"name": "fn"
}
]
}
]
}
}
),
);
}
#[tokio::test]
async fn
|
() {
let doc = r#"
{
__type(name: "MyInputType") {
inputFields {
name
}
}
}
"#;
let value = run_type_info_query(&doc).await;
assert_eq!(
value,
graphql_value!(
{
"__type": {
"inputFields": [
{
"name": "trait",
}
]
}
}
),
);
}
async fn run_type_info_query(doc: &str) -> Value {
let schema = RootNode::new(
Query,
EmptyMutation::<()>::new(),
EmptySubscription::<()>::new(),
);
let (result, errs) = execute(doc, None, &schema, &Variables::new(), &())
.await
.expect("Execution failed");
assert_eq!(errs, []);
println!("Result: {:#?}", result);
result
}
|
supports_raw_idents_in_fields_of_input_types
|
identifier_name
|
mod.rs
|
// Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
|
pub mod console;
pub mod engine;
pub mod host;
pub mod math;
pub mod mdl;
pub mod model;
pub mod net;
pub mod pak;
pub mod parse;
pub mod sprite;
pub mod util;
pub mod vfs;
pub mod wad;
use std::path::PathBuf;
pub fn default_base_dir() -> std::path::PathBuf {
match std::env::current_dir() {
Ok(cwd) => cwd,
Err(e) => {
log::error!("cannot access current directory: {}", e);
std::process::exit(1);
}
}
}
pub const MAX_LIGHTSTYLES: usize = 64;
/// The maximum number of `.pak` files that should be loaded at runtime.
///
/// The original engine does not make this restriction, and this limit can be increased if need be.
pub const MAX_PAKFILES: usize = 32;
|
pub mod alloc;
pub mod bitset;
pub mod bsp;
|
random_line_split
|
mod.rs
|
// Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
pub mod alloc;
pub mod bitset;
pub mod bsp;
pub mod console;
pub mod engine;
pub mod host;
pub mod math;
pub mod mdl;
pub mod model;
pub mod net;
pub mod pak;
pub mod parse;
pub mod sprite;
pub mod util;
pub mod vfs;
pub mod wad;
use std::path::PathBuf;
pub fn default_base_dir() -> std::path::PathBuf {
|
pub const MAX_LIGHTSTYLES: usize = 64;
/// The maximum number of `.pak` files that should be loaded at runtime.
///
/// The original engine does not make this restriction, and this limit can be increased if need be.
pub const MAX_PAKFILES: usize = 32;
|
match std::env::current_dir() {
Ok(cwd) => cwd,
Err(e) => {
log::error!("cannot access current directory: {}", e);
std::process::exit(1);
}
}
}
|
identifier_body
|
mod.rs
|
// Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
pub mod alloc;
pub mod bitset;
pub mod bsp;
pub mod console;
pub mod engine;
pub mod host;
pub mod math;
pub mod mdl;
pub mod model;
pub mod net;
pub mod pak;
pub mod parse;
pub mod sprite;
pub mod util;
pub mod vfs;
pub mod wad;
use std::path::PathBuf;
pub fn d
|
) -> std::path::PathBuf {
match std::env::current_dir() {
Ok(cwd) => cwd,
Err(e) => {
log::error!("cannot access current directory: {}", e);
std::process::exit(1);
}
}
}
pub const MAX_LIGHTSTYLES: usize = 64;
/// The maximum number of `.pak` files that should be loaded at runtime.
///
/// The original engine does not make this restriction, and this limit can be increased if need be.
pub const MAX_PAKFILES: usize = 32;
|
efault_base_dir(
|
identifier_name
|
lib.rs
|
#![no_std]
#![warn(missing_docs)]
//! A logging facade for writing logging information to the Linux Kernel System
//! Log.
use log;
use core::fmt;
use core::fmt::Write;
extern "C" {
fn printk(fmt: *const u8,...) -> i32;
}
/// A logger which writes to the Linux Kernel System Log.
pub struct DMesgLogger {}
struct PrintK {}
const KERN_INFO: &[u8; 2] = b"6\0";
// https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
impl fmt::Write for PrintK {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
unsafe {
printk(KERN_INFO.as_ptr());
}
for c in s.bytes() {
unsafe {
printk(b"c%c\0".as_ptr(), c as u32);
}
}
Ok(())
}
}
impl DMesgLogger {
/// A function similar to core::fmt::Write, except that self is not
/// mutable, so we can use it without doing any locking. We let Linux
/// handle the locking for us.
pub fn write_fmt(&self, args: core::fmt::Arguments) {
let mut printk_obj = PrintK {};
let _ = write!(printk_obj, "{}\r\n", args);
}
}
impl log::Log for DMesgLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::Level::Trace
}
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata()) {
self.write_fmt(*record.args());
}
}
fn flush(&self)
|
}
|
{}
|
identifier_body
|
lib.rs
|
#![no_std]
#![warn(missing_docs)]
//! A logging facade for writing logging information to the Linux Kernel System
//! Log.
use log;
use core::fmt;
use core::fmt::Write;
extern "C" {
fn printk(fmt: *const u8,...) -> i32;
}
/// A logger which writes to the Linux Kernel System Log.
pub struct DMesgLogger {}
struct
|
{}
const KERN_INFO: &[u8; 2] = b"6\0";
// https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
impl fmt::Write for PrintK {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
unsafe {
printk(KERN_INFO.as_ptr());
}
for c in s.bytes() {
unsafe {
printk(b"c%c\0".as_ptr(), c as u32);
}
}
Ok(())
}
}
impl DMesgLogger {
/// A function similar to core::fmt::Write, except that self is not
/// mutable, so we can use it without doing any locking. We let Linux
/// handle the locking for us.
pub fn write_fmt(&self, args: core::fmt::Arguments) {
let mut printk_obj = PrintK {};
let _ = write!(printk_obj, "{}\r\n", args);
}
}
impl log::Log for DMesgLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::Level::Trace
}
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata()) {
self.write_fmt(*record.args());
}
}
fn flush(&self) {}
}
|
PrintK
|
identifier_name
|
lib.rs
|
#![no_std]
#![warn(missing_docs)]
//! A logging facade for writing logging information to the Linux Kernel System
//! Log.
use log;
use core::fmt;
use core::fmt::Write;
extern "C" {
fn printk(fmt: *const u8,...) -> i32;
}
/// A logger which writes to the Linux Kernel System Log.
pub struct DMesgLogger {}
struct PrintK {}
const KERN_INFO: &[u8; 2] = b"6\0";
// https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
impl fmt::Write for PrintK {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
unsafe {
printk(KERN_INFO.as_ptr());
}
for c in s.bytes() {
unsafe {
printk(b"c%c\0".as_ptr(), c as u32);
}
}
Ok(())
}
}
impl DMesgLogger {
/// A function similar to core::fmt::Write, except that self is not
/// mutable, so we can use it without doing any locking. We let Linux
/// handle the locking for us.
pub fn write_fmt(&self, args: core::fmt::Arguments) {
let mut printk_obj = PrintK {};
let _ = write!(printk_obj, "{}\r\n", args);
}
}
impl log::Log for DMesgLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::Level::Trace
}
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata())
|
}
fn flush(&self) {}
}
|
{
self.write_fmt(*record.args());
}
|
conditional_block
|
lib.rs
|
#![no_std]
#![warn(missing_docs)]
//! A logging facade for writing logging information to the Linux Kernel System
//! Log.
use log;
use core::fmt;
use core::fmt::Write;
extern "C" {
fn printk(fmt: *const u8,...) -> i32;
}
/// A logger which writes to the Linux Kernel System Log.
pub struct DMesgLogger {}
struct PrintK {}
const KERN_INFO: &[u8; 2] = b"6\0";
// https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
impl fmt::Write for PrintK {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
unsafe {
printk(KERN_INFO.as_ptr());
}
for c in s.bytes() {
unsafe {
printk(b"c%c\0".as_ptr(), c as u32);
}
}
Ok(())
}
}
impl DMesgLogger {
/// A function similar to core::fmt::Write, except that self is not
/// mutable, so we can use it without doing any locking. We let Linux
/// handle the locking for us.
pub fn write_fmt(&self, args: core::fmt::Arguments) {
let mut printk_obj = PrintK {};
let _ = write!(printk_obj, "{}\r\n", args);
}
}
impl log::Log for DMesgLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::Level::Trace
}
|
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata()) {
self.write_fmt(*record.args());
}
}
fn flush(&self) {}
}
|
random_line_split
|
|
add.rs
|
use std::error;
use std::fs;
use std::path;
use std::io::Read;
use itertools::Itertools;
use fat;
pub fn add_file(args: &[String])
-> Result<(), Box<error::Error>>
|
// Ensure input file exists.
let file = fs::File::open(file_name)?;
let metadata = file.metadata()?;
// Create a root dir entry.
let (entry, index) =
image.create_file_entry(fat_file_name, metadata.len() as u32)?;
// Get free FAT entries, fill sectors with file data.
for chunk in &file.bytes().chunks(image.sector_size()) {
let chunk = chunk
.map(|b_res| b_res.unwrap_or(0))
.collect::<Vec<_>>();
// Get free sector.
let entry_index: usize;
match image.get_free_fat_entry() {
Some(i) => entry_index = i,
None => {
// TODO: Remove entries written so far.
panic!("image ran out of space while writing file")
},
}
// Write chunk.
image.write_data_sector(entry_index, &chunk)?;
}
image.save_file_entry(entry, index)?;
image.save(image_name)?;
Ok(())
}
|
{
expect_args!(args, 2);
let file_name = args[0].clone();
let image_name = args[1].clone();
let fat_file_name = if args.len() > 2 {
args[2].clone()
} else {
match path::Path::new(&file_name).file_name() {
Some(n) => n.to_string_lossy().into_owned(),
None => file_name.clone(),
}
};
let mut image = fat::Image::from_file(image_name.clone())?;
// Don't overwrite a preexisting file.
if let Ok(_) = image.get_file_entry(file_name.clone()) {
return Err(errorf!("file {} already exists", file_name));
}
|
identifier_body
|
add.rs
|
use std::error;
use std::fs;
use std::path;
use std::io::Read;
use itertools::Itertools;
use fat;
pub fn
|
(args: &[String])
-> Result<(), Box<error::Error>>
{
expect_args!(args, 2);
let file_name = args[0].clone();
let image_name = args[1].clone();
let fat_file_name = if args.len() > 2 {
args[2].clone()
} else {
match path::Path::new(&file_name).file_name() {
Some(n) => n.to_string_lossy().into_owned(),
None => file_name.clone(),
}
};
let mut image = fat::Image::from_file(image_name.clone())?;
// Don't overwrite a preexisting file.
if let Ok(_) = image.get_file_entry(file_name.clone()) {
return Err(errorf!("file {} already exists", file_name));
}
// Ensure input file exists.
let file = fs::File::open(file_name)?;
let metadata = file.metadata()?;
// Create a root dir entry.
let (entry, index) =
image.create_file_entry(fat_file_name, metadata.len() as u32)?;
// Get free FAT entries, fill sectors with file data.
for chunk in &file.bytes().chunks(image.sector_size()) {
let chunk = chunk
.map(|b_res| b_res.unwrap_or(0))
.collect::<Vec<_>>();
// Get free sector.
let entry_index: usize;
match image.get_free_fat_entry() {
Some(i) => entry_index = i,
None => {
// TODO: Remove entries written so far.
panic!("image ran out of space while writing file")
},
}
// Write chunk.
image.write_data_sector(entry_index, &chunk)?;
}
image.save_file_entry(entry, index)?;
image.save(image_name)?;
Ok(())
}
|
add_file
|
identifier_name
|
add.rs
|
use std::error;
use std::fs;
use std::path;
use std::io::Read;
use itertools::Itertools;
use fat;
pub fn add_file(args: &[String])
-> Result<(), Box<error::Error>>
{
expect_args!(args, 2);
let file_name = args[0].clone();
let image_name = args[1].clone();
let fat_file_name = if args.len() > 2 {
args[2].clone()
} else {
match path::Path::new(&file_name).file_name() {
Some(n) => n.to_string_lossy().into_owned(),
None => file_name.clone(),
}
};
let mut image = fat::Image::from_file(image_name.clone())?;
// Don't overwrite a preexisting file.
if let Ok(_) = image.get_file_entry(file_name.clone()) {
return Err(errorf!("file {} already exists", file_name));
}
// Ensure input file exists.
let file = fs::File::open(file_name)?;
let metadata = file.metadata()?;
// Create a root dir entry.
let (entry, index) =
image.create_file_entry(fat_file_name, metadata.len() as u32)?;
// Get free FAT entries, fill sectors with file data.
for chunk in &file.bytes().chunks(image.sector_size()) {
let chunk = chunk
.map(|b_res| b_res.unwrap_or(0))
.collect::<Vec<_>>();
|
// Get free sector.
let entry_index: usize;
match image.get_free_fat_entry() {
Some(i) => entry_index = i,
None => {
// TODO: Remove entries written so far.
panic!("image ran out of space while writing file")
},
}
// Write chunk.
image.write_data_sector(entry_index, &chunk)?;
}
image.save_file_entry(entry, index)?;
image.save(image_name)?;
Ok(())
}
|
random_line_split
|
|
error.rs
|
use image::ImageError;
use std::{io, fmt};
use std::error::Error;
#[derive(Debug)]
pub enum ShadowError {
Configuration(String),
ImageLibrary(ImageError),
Image(String),
Io(io::Error),
NotImplemented,
}
impl Error for ShadowError {
fn description(&self) -> &str {
match *self {
ShadowError::Configuration(_) => "configuration error",
ShadowError::ImageLibrary(ref err) => err.description(),
ShadowError::Image(_) => "image error",
ShadowError::Io(ref err) => err.description(),
ShadowError::NotImplemented => "not implemented",
}
}
fn cause(&self) -> Option<&Error>
|
}
impl fmt::Display for ShadowError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ShadowError::Configuration(ref msg) => write!(f, "Configuration error: {}", msg),
ShadowError::ImageLibrary(ref err) => err.fmt(f),
ShadowError::Image(ref msg) => write!(f, "Image error: {}", msg),
ShadowError::Io(ref err) => err.fmt(f),
ShadowError::NotImplemented => write!(f, "Not currently implemented"),
}
}
}
impl From<io::Error> for ShadowError {
fn from(err: io::Error) -> ShadowError {
ShadowError::Io(err)
}
}
impl From<ImageError> for ShadowError {
fn from(err: ImageError) -> ShadowError {
ShadowError::ImageLibrary(err)
}
}
|
{
match *self {
ShadowError::ImageLibrary(ref err) => err.cause(),
ShadowError::Io(ref err) => err.cause(),
_ => None,
}
}
|
identifier_body
|
error.rs
|
use image::ImageError;
use std::{io, fmt};
use std::error::Error;
#[derive(Debug)]
pub enum ShadowError {
Configuration(String),
ImageLibrary(ImageError),
Image(String),
Io(io::Error),
NotImplemented,
}
impl Error for ShadowError {
fn description(&self) -> &str {
match *self {
ShadowError::Configuration(_) => "configuration error",
ShadowError::ImageLibrary(ref err) => err.description(),
ShadowError::Image(_) => "image error",
ShadowError::Io(ref err) => err.description(),
ShadowError::NotImplemented => "not implemented",
}
}
fn cause(&self) -> Option<&Error> {
match *self {
ShadowError::ImageLibrary(ref err) => err.cause(),
ShadowError::Io(ref err) => err.cause(),
_ => None,
}
}
}
impl fmt::Display for ShadowError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ShadowError::Configuration(ref msg) => write!(f, "Configuration error: {}", msg),
ShadowError::ImageLibrary(ref err) => err.fmt(f),
ShadowError::Image(ref msg) => write!(f, "Image error: {}", msg),
ShadowError::Io(ref err) => err.fmt(f),
ShadowError::NotImplemented => write!(f, "Not currently implemented"),
}
}
}
impl From<io::Error> for ShadowError {
fn
|
(err: io::Error) -> ShadowError {
ShadowError::Io(err)
}
}
impl From<ImageError> for ShadowError {
fn from(err: ImageError) -> ShadowError {
ShadowError::ImageLibrary(err)
}
}
|
from
|
identifier_name
|
error.rs
|
use image::ImageError;
use std::{io, fmt};
use std::error::Error;
#[derive(Debug)]
pub enum ShadowError {
Configuration(String),
|
ImageLibrary(ImageError),
Image(String),
Io(io::Error),
NotImplemented,
}
impl Error for ShadowError {
fn description(&self) -> &str {
match *self {
ShadowError::Configuration(_) => "configuration error",
ShadowError::ImageLibrary(ref err) => err.description(),
ShadowError::Image(_) => "image error",
ShadowError::Io(ref err) => err.description(),
ShadowError::NotImplemented => "not implemented",
}
}
fn cause(&self) -> Option<&Error> {
match *self {
ShadowError::ImageLibrary(ref err) => err.cause(),
ShadowError::Io(ref err) => err.cause(),
_ => None,
}
}
}
impl fmt::Display for ShadowError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ShadowError::Configuration(ref msg) => write!(f, "Configuration error: {}", msg),
ShadowError::ImageLibrary(ref err) => err.fmt(f),
ShadowError::Image(ref msg) => write!(f, "Image error: {}", msg),
ShadowError::Io(ref err) => err.fmt(f),
ShadowError::NotImplemented => write!(f, "Not currently implemented"),
}
}
}
impl From<io::Error> for ShadowError {
fn from(err: io::Error) -> ShadowError {
ShadowError::Io(err)
}
}
impl From<ImageError> for ShadowError {
fn from(err: ImageError) -> ShadowError {
ShadowError::ImageLibrary(err)
}
}
|
random_line_split
|
|
mod.rs
|
#[macro_use]
// has macros, must go first
mod utils;
pub mod annotation;
pub mod attribute;
pub mod attribute_group;
pub mod attributes;
pub mod choice;
pub mod common;
pub mod complex_content;
pub mod complex_type;
pub mod constants;
pub mod element;
pub mod extension;
pub mod group;
pub mod id;
pub mod import;
pub mod list;
pub mod primitives;
pub mod restriction;
pub mod sequence;
pub mod simple_content;
pub mod simple_type;
pub mod union;
use crate::error::Result;
use crate::xsd::annotation::Annotation;
use crate::xsd::attribute_group::AttributeGroup;
use crate::xsd::complex_type::ComplexType;
use crate::xsd::constants::{
ANNOTATION, ATTRIBUTE_GROUP, BASE, COMPLEX_TYPE, DEFAULT, ELEMENT, FIXED, GROUP, IMPORT,
MAX_OCCURS, MIN_OCCURS, NAME, NAMESPACE, REF, REQUIRED, SIMPLE_TYPE, TYPE, UNBOUNDED, USE,
VALUE,
};
use crate::xsd::element::Element;
use crate::xsd::group::GroupDefinition;
use crate::xsd::id::{Id, Lineage, RootNodeType};
use crate::xsd::import::Import;
use crate::xsd::simple_type::SimpleType;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::fmt::{Display, Formatter};
use std::path::Path;
#[derive(Clone, Debug)]
pub struct Xsd {
entries: Vec<Entry>,
prefix: String,
}
impl Default for Xsd {
fn default() -> Self {
Self {
entries: Vec::new(),
prefix: "xs".to_owned(),
}
}
}
impl Xsd {
pub fn load<P: AsRef<Path>>(filepath: P) -> Result<Self> {
let xml_str = wrap!(
std::fs::read_to_string(filepath.as_ref()),
"unable to load '{}'",
filepath.as_ref().display()
)?;
let doc = exile::parse(&xml_str).unwrap();
Self::parse(doc.root())
}
pub fn parse(root: &exile::Element) -> Result<Self> {
if root.name!= "schema" {
return raise!("expected the root node to be named'schema'");
}
let mut prefix = "";
for (k, v) in root.attributes.map() {
if v.as_str() == "http://www.w3.org/2001/XMLSchema" {
if k.starts_with("xmlns:") {
let mut split = k.split(':');
let _ = split.next().ok_or(make_err!("expected to find xmlns:"))?;
let ns: &str = split
.next()
.ok_or(make_err!("expected to find xmlns prefix"))?;
prefix = ns;
break;
}
}
}
if prefix.is_empty() {
return raise!("xmlns prefix is empty");
}
let mut xsd = Xsd {
entries: Vec::new(),
prefix: prefix.to_owned(),
};
for (i, entry_node) in root.children().enumerate() {
let entry = Entry::from_xml(entry_node, Lineage::Index(i as u64), &xsd)?;
xsd.add_entry(entry)?;
}
Ok(xsd)
}
pub fn new<S: AsRef<str>>(prefix: S) -> Self {
Self {
entries: Vec::new(),
prefix: prefix.as_ref().into(),
}
}
pub fn prefix(&self) -> &str {
self.prefix.as_str()
}
pub fn add_entry(&mut self, entry: Entry) -> Result<()> {
// TODO - make an efficient storage
self.entries.push(entry);
Ok(())
}
pub fn find(&self, id: &Id) -> Result<&Entry> {
// TODO - make an efficient lookup
for entry in &self.entries {
if entry.id() == id {
return Ok(entry);
}
}
raise!("id '{}' not found", id)
}
pub fn remove(&mut self, id: &Id) -> Result<Entry> {
// TODO - efficient removal
let mut pos = None;
for (i, entry) in self.entries.iter().enumerate() {
if entry.id() == id {
pos = Some(i);
break;
}
}
if let Some(i) = pos {
// Note - this can panic, but shouldn't unless a data race occurs.
Ok(self.entries.remove(i))
} else {
raise!("entry '{}' not found", id)
}
}
// TODO - this should be an iterator so the underlying data structure can change.
pub fn entries(&self) -> &Vec<Entry> {
&self.entries
}
}
impl Display for Xsd {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for entry in &self.entries {
writeln!(f, "{}", entry.id())?;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub enum Entry {
Annotation(Annotation),
AttributeGroup(AttributeGroup),
ComplexType(ComplexType),
Element(Element),
Group(GroupDefinition),
Import(Import),
SimpleType(SimpleType),
}
impl Entry {
pub fn from_xml(node: &exile::Element, lineage: Lineage, xsd: &Xsd) -> Result<Self> {
let n = node.name.as_str();
let t = RootNodeType::parse(n)?;
match t {
RootNodeType::Annotation => {
Ok(Entry::Annotation(Annotation::from_xml(node, lineage, xsd)?))
}
RootNodeType::AttributeGroup => Ok(Entry::AttributeGroup(AttributeGroup::from_xml(
node, lineage, xsd,
)?)),
RootNodeType::ComplexType => Ok(Entry::ComplexType(ComplexType::from_xml(
node, lineage, xsd,
)?)),
RootNodeType::Element => Ok(Entry::Element(Element::from_xml(node, lineage, xsd)?)),
RootNodeType::Group => Ok(Entry::Group(GroupDefinition::from_xml(node, lineage, xsd)?)),
RootNodeType::Import => Ok(Entry::Import(Import::from_xml(node, lineage, xsd)?)),
RootNodeType::SimpleType => {
Ok(Entry::SimpleType(SimpleType::from_xml(node, lineage, xsd)?))
}
}
}
pub fn id(&self) -> &Id {
match self {
Entry::Annotation(x) => &x.id,
Entry::AttributeGroup(x) => x.id(),
Entry::ComplexType(x) => &x.id,
Entry::Element(x) => x.id(),
Entry::Group(x) => &x.id,
Entry::Import(x) => &x.id,
Entry::SimpleType(x) => &x.id,
}
}
pub fn documentation(&self) -> String {
match self {
Entry::Annotation(x) => x.documentation(),
Entry::AttributeGroup(x) => x.documentation(),
Entry::ComplexType(x) => x.documentation(),
Entry::Element(x) => x.documentation(),
Entry::Group(x) => x.documentation(),
Entry::Import(x) => x.documentation(),
Entry::SimpleType(x) => x.documentation(),
}
}
}
pub(crate) fn get_attribute<S: AsRef<str>>(
node: &exile::Element,
attribute_name: S,
) -> Result<String> {
Ok(node
.attributes
.map()
.get(attribute_name.as_ref())
.ok_or(make_err!(
"'{}' attribute not found in '{}' node",
attribute_name.as_ref(),
node.name.as_str()
))?
.clone())
}
pub(crate) fn name_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, NAME)
}
pub(crate) fn namespace_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, NAMESPACE)
}
pub(crate) fn value_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, VALUE)
}
pub(crate) fn ref_attribute(node: &exile::Element) -> Result<String>
|
pub(crate) fn type_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, TYPE)
}
pub(crate) fn use_required(node: &exile::Element) -> bool {
match get_attribute(node, USE) {
Ok(val) => val.as_str() == REQUIRED,
Err(_) => false,
}
}
pub(crate) fn default_attribute(node: &exile::Element) -> Option<String> {
node.attributes.map().get(DEFAULT).cloned()
}
pub(crate) fn fixed_attribute(node: &exile::Element) -> Option<String> {
node.attributes.map().get(FIXED).cloned()
}
pub(crate) fn is_ref(node: &exile::Element) -> bool {
node.attributes.map().get(REF).is_some()
}
pub(crate) fn base_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, BASE)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct Occurs {
pub min_occurs: u64,
/// None means `unbounded`
pub max_occurs: Option<u64>,
}
impl Default for Occurs {
fn default() -> Self {
Self {
min_occurs: 1,
max_occurs: Some(1),
}
}
}
impl Occurs {
pub fn from_xml(node: &exile::Element) -> Result<Occurs> {
Ok(Self::from_map(node.attributes.map())?)
}
pub fn from_map(map: &BTreeMap<String, String>) -> Result<Occurs> {
let min_occurs: u64 = if let Some(sval) = map.get(MIN_OCCURS) {
wrap!(sval.parse::<u64>())?
} else {
1
};
let max_occurs: Option<u64> = if let Some(sval) = map.get(MAX_OCCURS) {
if sval.as_str() == UNBOUNDED {
None
} else {
Some(wrap!(sval.parse::<u64>())?)
}
} else {
Some(1)
};
if let Some(the_max) = max_occurs {
if min_occurs > the_max {
return raise!(
"{} cannot be greater than {}, in this case {} is {} and {} is {}",
MIN_OCCURS,
MAX_OCCURS,
MIN_OCCURS,
min_occurs,
MAX_OCCURS,
the_max
);
}
}
Ok(Self {
min_occurs,
max_occurs,
})
}
}
#[test]
fn parse_occurs() {
let test_cases = vec![
(
r#"<xyz minOccurs="1"/>"#,
Occurs {
min_occurs: 1,
max_occurs: Some(1),
},
),
(
r#"<xyz maxOccurs="unbounded"/>"#,
Occurs {
min_occurs: 1,
max_occurs: None,
},
),
(
r#"<xyz/>"#,
Occurs {
min_occurs: 1,
max_occurs: Some(1),
},
),
(
r#"<xyz minOccurs="2" maxOccurs="3"/>"#,
Occurs {
min_occurs: 2,
max_occurs: Some(3),
},
),
];
for (xml, want) in test_cases {
let doc = exile::parse(xml).unwrap();
let got = Occurs::from_xml(doc.root()).unwrap();
assert_eq!(got, want)
}
}
#[test]
fn parse_occurs_err() {
let test_cases = vec![
r#"<xyz minOccurs="10" maxOccurs="1"/>"#,
r#"<xyz maxOccurs="unexpectedString"/>"#,
];
for xml in test_cases {
let doc = exile::parse(xml).unwrap();
assert!(Occurs::from_xml(doc.root()).is_err());
}
}
|
{
get_attribute(node, REF)
}
|
identifier_body
|
mod.rs
|
#[macro_use]
// has macros, must go first
mod utils;
pub mod annotation;
pub mod attribute;
pub mod attribute_group;
pub mod attributes;
pub mod choice;
pub mod common;
pub mod complex_content;
pub mod complex_type;
pub mod constants;
pub mod element;
pub mod extension;
pub mod group;
pub mod id;
pub mod import;
pub mod list;
pub mod primitives;
pub mod restriction;
pub mod sequence;
pub mod simple_content;
pub mod simple_type;
pub mod union;
use crate::error::Result;
use crate::xsd::annotation::Annotation;
use crate::xsd::attribute_group::AttributeGroup;
use crate::xsd::complex_type::ComplexType;
use crate::xsd::constants::{
ANNOTATION, ATTRIBUTE_GROUP, BASE, COMPLEX_TYPE, DEFAULT, ELEMENT, FIXED, GROUP, IMPORT,
MAX_OCCURS, MIN_OCCURS, NAME, NAMESPACE, REF, REQUIRED, SIMPLE_TYPE, TYPE, UNBOUNDED, USE,
VALUE,
};
use crate::xsd::element::Element;
use crate::xsd::group::GroupDefinition;
use crate::xsd::id::{Id, Lineage, RootNodeType};
use crate::xsd::import::Import;
use crate::xsd::simple_type::SimpleType;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::fmt::{Display, Formatter};
use std::path::Path;
#[derive(Clone, Debug)]
pub struct Xsd {
entries: Vec<Entry>,
prefix: String,
}
impl Default for Xsd {
fn default() -> Self {
Self {
entries: Vec::new(),
prefix: "xs".to_owned(),
}
}
}
impl Xsd {
pub fn load<P: AsRef<Path>>(filepath: P) -> Result<Self> {
let xml_str = wrap!(
std::fs::read_to_string(filepath.as_ref()),
"unable to load '{}'",
filepath.as_ref().display()
)?;
let doc = exile::parse(&xml_str).unwrap();
Self::parse(doc.root())
}
pub fn parse(root: &exile::Element) -> Result<Self> {
if root.name!= "schema" {
return raise!("expected the root node to be named'schema'");
}
let mut prefix = "";
for (k, v) in root.attributes.map() {
if v.as_str() == "http://www.w3.org/2001/XMLSchema" {
if k.starts_with("xmlns:") {
let mut split = k.split(':');
let _ = split.next().ok_or(make_err!("expected to find xmlns:"))?;
let ns: &str = split
.next()
.ok_or(make_err!("expected to find xmlns prefix"))?;
prefix = ns;
break;
}
}
}
if prefix.is_empty() {
return raise!("xmlns prefix is empty");
}
let mut xsd = Xsd {
entries: Vec::new(),
prefix: prefix.to_owned(),
};
for (i, entry_node) in root.children().enumerate() {
let entry = Entry::from_xml(entry_node, Lineage::Index(i as u64), &xsd)?;
xsd.add_entry(entry)?;
}
Ok(xsd)
}
pub fn new<S: AsRef<str>>(prefix: S) -> Self {
Self {
entries: Vec::new(),
prefix: prefix.as_ref().into(),
}
}
pub fn prefix(&self) -> &str {
self.prefix.as_str()
}
pub fn add_entry(&mut self, entry: Entry) -> Result<()> {
// TODO - make an efficient storage
self.entries.push(entry);
Ok(())
}
pub fn find(&self, id: &Id) -> Result<&Entry> {
// TODO - make an efficient lookup
for entry in &self.entries {
if entry.id() == id {
return Ok(entry);
}
}
raise!("id '{}' not found", id)
}
pub fn remove(&mut self, id: &Id) -> Result<Entry> {
// TODO - efficient removal
let mut pos = None;
for (i, entry) in self.entries.iter().enumerate() {
if entry.id() == id {
pos = Some(i);
break;
}
}
if let Some(i) = pos {
// Note - this can panic, but shouldn't unless a data race occurs.
Ok(self.entries.remove(i))
} else {
raise!("entry '{}' not found", id)
}
}
// TODO - this should be an iterator so the underlying data structure can change.
pub fn entries(&self) -> &Vec<Entry> {
&self.entries
}
}
impl Display for Xsd {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for entry in &self.entries {
writeln!(f, "{}", entry.id())?;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub enum Entry {
Annotation(Annotation),
AttributeGroup(AttributeGroup),
ComplexType(ComplexType),
Element(Element),
Group(GroupDefinition),
Import(Import),
SimpleType(SimpleType),
}
impl Entry {
pub fn from_xml(node: &exile::Element, lineage: Lineage, xsd: &Xsd) -> Result<Self> {
let n = node.name.as_str();
let t = RootNodeType::parse(n)?;
match t {
RootNodeType::Annotation => {
Ok(Entry::Annotation(Annotation::from_xml(node, lineage, xsd)?))
}
RootNodeType::AttributeGroup => Ok(Entry::AttributeGroup(AttributeGroup::from_xml(
node, lineage, xsd,
)?)),
RootNodeType::ComplexType => Ok(Entry::ComplexType(ComplexType::from_xml(
node, lineage, xsd,
)?)),
RootNodeType::Element => Ok(Entry::Element(Element::from_xml(node, lineage, xsd)?)),
RootNodeType::Group => Ok(Entry::Group(GroupDefinition::from_xml(node, lineage, xsd)?)),
RootNodeType::Import => Ok(Entry::Import(Import::from_xml(node, lineage, xsd)?)),
RootNodeType::SimpleType => {
Ok(Entry::SimpleType(SimpleType::from_xml(node, lineage, xsd)?))
}
}
}
pub fn id(&self) -> &Id {
match self {
Entry::Annotation(x) => &x.id,
Entry::AttributeGroup(x) => x.id(),
Entry::ComplexType(x) => &x.id,
Entry::Element(x) => x.id(),
Entry::Group(x) => &x.id,
Entry::Import(x) => &x.id,
Entry::SimpleType(x) => &x.id,
}
}
pub fn documentation(&self) -> String {
match self {
Entry::Annotation(x) => x.documentation(),
Entry::AttributeGroup(x) => x.documentation(),
Entry::ComplexType(x) => x.documentation(),
Entry::Element(x) => x.documentation(),
Entry::Group(x) => x.documentation(),
Entry::Import(x) => x.documentation(),
Entry::SimpleType(x) => x.documentation(),
}
}
}
pub(crate) fn get_attribute<S: AsRef<str>>(
node: &exile::Element,
attribute_name: S,
) -> Result<String> {
Ok(node
.attributes
.map()
.get(attribute_name.as_ref())
.ok_or(make_err!(
"'{}' attribute not found in '{}' node",
attribute_name.as_ref(),
node.name.as_str()
))?
.clone())
}
pub(crate) fn name_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, NAME)
}
pub(crate) fn namespace_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, NAMESPACE)
}
pub(crate) fn value_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, VALUE)
}
pub(crate) fn ref_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, REF)
}
pub(crate) fn
|
(node: &exile::Element) -> Result<String> {
get_attribute(node, TYPE)
}
pub(crate) fn use_required(node: &exile::Element) -> bool {
match get_attribute(node, USE) {
Ok(val) => val.as_str() == REQUIRED,
Err(_) => false,
}
}
pub(crate) fn default_attribute(node: &exile::Element) -> Option<String> {
node.attributes.map().get(DEFAULT).cloned()
}
pub(crate) fn fixed_attribute(node: &exile::Element) -> Option<String> {
node.attributes.map().get(FIXED).cloned()
}
pub(crate) fn is_ref(node: &exile::Element) -> bool {
node.attributes.map().get(REF).is_some()
}
pub(crate) fn base_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, BASE)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct Occurs {
pub min_occurs: u64,
/// None means `unbounded`
pub max_occurs: Option<u64>,
}
impl Default for Occurs {
fn default() -> Self {
Self {
min_occurs: 1,
max_occurs: Some(1),
}
}
}
impl Occurs {
pub fn from_xml(node: &exile::Element) -> Result<Occurs> {
Ok(Self::from_map(node.attributes.map())?)
}
pub fn from_map(map: &BTreeMap<String, String>) -> Result<Occurs> {
let min_occurs: u64 = if let Some(sval) = map.get(MIN_OCCURS) {
wrap!(sval.parse::<u64>())?
} else {
1
};
let max_occurs: Option<u64> = if let Some(sval) = map.get(MAX_OCCURS) {
if sval.as_str() == UNBOUNDED {
None
} else {
Some(wrap!(sval.parse::<u64>())?)
}
} else {
Some(1)
};
if let Some(the_max) = max_occurs {
if min_occurs > the_max {
return raise!(
"{} cannot be greater than {}, in this case {} is {} and {} is {}",
MIN_OCCURS,
MAX_OCCURS,
MIN_OCCURS,
min_occurs,
MAX_OCCURS,
the_max
);
}
}
Ok(Self {
min_occurs,
max_occurs,
})
}
}
#[test]
fn parse_occurs() {
let test_cases = vec![
(
r#"<xyz minOccurs="1"/>"#,
Occurs {
min_occurs: 1,
max_occurs: Some(1),
},
),
(
r#"<xyz maxOccurs="unbounded"/>"#,
Occurs {
min_occurs: 1,
max_occurs: None,
},
),
(
r#"<xyz/>"#,
Occurs {
min_occurs: 1,
max_occurs: Some(1),
},
),
(
r#"<xyz minOccurs="2" maxOccurs="3"/>"#,
Occurs {
min_occurs: 2,
max_occurs: Some(3),
},
),
];
for (xml, want) in test_cases {
let doc = exile::parse(xml).unwrap();
let got = Occurs::from_xml(doc.root()).unwrap();
assert_eq!(got, want)
}
}
#[test]
fn parse_occurs_err() {
let test_cases = vec![
r#"<xyz minOccurs="10" maxOccurs="1"/>"#,
r#"<xyz maxOccurs="unexpectedString"/>"#,
];
for xml in test_cases {
let doc = exile::parse(xml).unwrap();
assert!(Occurs::from_xml(doc.root()).is_err());
}
}
|
type_attribute
|
identifier_name
|
mod.rs
|
#[macro_use]
// has macros, must go first
mod utils;
pub mod annotation;
pub mod attribute;
pub mod attribute_group;
pub mod attributes;
pub mod choice;
pub mod common;
pub mod complex_content;
pub mod complex_type;
pub mod constants;
pub mod element;
pub mod extension;
pub mod group;
pub mod id;
pub mod import;
pub mod list;
pub mod primitives;
pub mod restriction;
pub mod sequence;
pub mod simple_content;
pub mod simple_type;
pub mod union;
use crate::error::Result;
use crate::xsd::annotation::Annotation;
use crate::xsd::attribute_group::AttributeGroup;
use crate::xsd::complex_type::ComplexType;
use crate::xsd::constants::{
ANNOTATION, ATTRIBUTE_GROUP, BASE, COMPLEX_TYPE, DEFAULT, ELEMENT, FIXED, GROUP, IMPORT,
MAX_OCCURS, MIN_OCCURS, NAME, NAMESPACE, REF, REQUIRED, SIMPLE_TYPE, TYPE, UNBOUNDED, USE,
VALUE,
};
use crate::xsd::element::Element;
use crate::xsd::group::GroupDefinition;
use crate::xsd::id::{Id, Lineage, RootNodeType};
use crate::xsd::import::Import;
use crate::xsd::simple_type::SimpleType;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::fmt::{Display, Formatter};
use std::path::Path;
#[derive(Clone, Debug)]
pub struct Xsd {
entries: Vec<Entry>,
prefix: String,
}
impl Default for Xsd {
fn default() -> Self {
Self {
entries: Vec::new(),
prefix: "xs".to_owned(),
}
}
}
impl Xsd {
pub fn load<P: AsRef<Path>>(filepath: P) -> Result<Self> {
let xml_str = wrap!(
std::fs::read_to_string(filepath.as_ref()),
"unable to load '{}'",
filepath.as_ref().display()
)?;
let doc = exile::parse(&xml_str).unwrap();
Self::parse(doc.root())
}
pub fn parse(root: &exile::Element) -> Result<Self> {
if root.name!= "schema" {
return raise!("expected the root node to be named'schema'");
}
let mut prefix = "";
for (k, v) in root.attributes.map() {
if v.as_str() == "http://www.w3.org/2001/XMLSchema" {
if k.starts_with("xmlns:") {
let mut split = k.split(':');
let _ = split.next().ok_or(make_err!("expected to find xmlns:"))?;
let ns: &str = split
.next()
.ok_or(make_err!("expected to find xmlns prefix"))?;
prefix = ns;
break;
}
}
}
if prefix.is_empty() {
return raise!("xmlns prefix is empty");
}
let mut xsd = Xsd {
entries: Vec::new(),
prefix: prefix.to_owned(),
};
for (i, entry_node) in root.children().enumerate() {
let entry = Entry::from_xml(entry_node, Lineage::Index(i as u64), &xsd)?;
xsd.add_entry(entry)?;
}
Ok(xsd)
}
pub fn new<S: AsRef<str>>(prefix: S) -> Self {
Self {
entries: Vec::new(),
prefix: prefix.as_ref().into(),
}
}
pub fn prefix(&self) -> &str {
self.prefix.as_str()
}
pub fn add_entry(&mut self, entry: Entry) -> Result<()> {
// TODO - make an efficient storage
self.entries.push(entry);
Ok(())
}
pub fn find(&self, id: &Id) -> Result<&Entry> {
// TODO - make an efficient lookup
for entry in &self.entries {
if entry.id() == id {
return Ok(entry);
}
}
raise!("id '{}' not found", id)
}
pub fn remove(&mut self, id: &Id) -> Result<Entry> {
// TODO - efficient removal
let mut pos = None;
for (i, entry) in self.entries.iter().enumerate() {
if entry.id() == id {
pos = Some(i);
break;
}
}
if let Some(i) = pos {
// Note - this can panic, but shouldn't unless a data race occurs.
Ok(self.entries.remove(i))
} else {
raise!("entry '{}' not found", id)
}
}
// TODO - this should be an iterator so the underlying data structure can change.
pub fn entries(&self) -> &Vec<Entry> {
&self.entries
}
}
impl Display for Xsd {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for entry in &self.entries {
writeln!(f, "{}", entry.id())?;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub enum Entry {
Annotation(Annotation),
AttributeGroup(AttributeGroup),
ComplexType(ComplexType),
Element(Element),
Group(GroupDefinition),
Import(Import),
SimpleType(SimpleType),
}
impl Entry {
pub fn from_xml(node: &exile::Element, lineage: Lineage, xsd: &Xsd) -> Result<Self> {
let n = node.name.as_str();
let t = RootNodeType::parse(n)?;
match t {
RootNodeType::Annotation => {
Ok(Entry::Annotation(Annotation::from_xml(node, lineage, xsd)?))
}
RootNodeType::AttributeGroup => Ok(Entry::AttributeGroup(AttributeGroup::from_xml(
node, lineage, xsd,
)?)),
RootNodeType::ComplexType => Ok(Entry::ComplexType(ComplexType::from_xml(
node, lineage, xsd,
)?)),
RootNodeType::Element => Ok(Entry::Element(Element::from_xml(node, lineage, xsd)?)),
RootNodeType::Group => Ok(Entry::Group(GroupDefinition::from_xml(node, lineage, xsd)?)),
RootNodeType::Import => Ok(Entry::Import(Import::from_xml(node, lineage, xsd)?)),
RootNodeType::SimpleType => {
Ok(Entry::SimpleType(SimpleType::from_xml(node, lineage, xsd)?))
}
}
}
pub fn id(&self) -> &Id {
match self {
Entry::Annotation(x) => &x.id,
Entry::AttributeGroup(x) => x.id(),
Entry::ComplexType(x) => &x.id,
Entry::Element(x) => x.id(),
Entry::Group(x) => &x.id,
Entry::Import(x) => &x.id,
Entry::SimpleType(x) => &x.id,
}
}
pub fn documentation(&self) -> String {
match self {
Entry::Annotation(x) => x.documentation(),
Entry::AttributeGroup(x) => x.documentation(),
Entry::ComplexType(x) => x.documentation(),
Entry::Element(x) => x.documentation(),
Entry::Group(x) => x.documentation(),
Entry::Import(x) => x.documentation(),
Entry::SimpleType(x) => x.documentation(),
}
}
}
pub(crate) fn get_attribute<S: AsRef<str>>(
node: &exile::Element,
attribute_name: S,
) -> Result<String> {
Ok(node
.attributes
.map()
.get(attribute_name.as_ref())
.ok_or(make_err!(
"'{}' attribute not found in '{}' node",
attribute_name.as_ref(),
node.name.as_str()
))?
.clone())
}
pub(crate) fn name_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, NAME)
}
pub(crate) fn namespace_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, NAMESPACE)
}
pub(crate) fn value_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, VALUE)
}
pub(crate) fn ref_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, REF)
}
pub(crate) fn type_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, TYPE)
}
pub(crate) fn use_required(node: &exile::Element) -> bool {
match get_attribute(node, USE) {
Ok(val) => val.as_str() == REQUIRED,
Err(_) => false,
}
}
pub(crate) fn default_attribute(node: &exile::Element) -> Option<String> {
node.attributes.map().get(DEFAULT).cloned()
}
pub(crate) fn fixed_attribute(node: &exile::Element) -> Option<String> {
node.attributes.map().get(FIXED).cloned()
}
pub(crate) fn is_ref(node: &exile::Element) -> bool {
node.attributes.map().get(REF).is_some()
}
pub(crate) fn base_attribute(node: &exile::Element) -> Result<String> {
get_attribute(node, BASE)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct Occurs {
pub min_occurs: u64,
/// None means `unbounded`
pub max_occurs: Option<u64>,
}
impl Default for Occurs {
fn default() -> Self {
Self {
min_occurs: 1,
max_occurs: Some(1),
}
}
}
impl Occurs {
pub fn from_xml(node: &exile::Element) -> Result<Occurs> {
Ok(Self::from_map(node.attributes.map())?)
}
pub fn from_map(map: &BTreeMap<String, String>) -> Result<Occurs> {
let min_occurs: u64 = if let Some(sval) = map.get(MIN_OCCURS) {
wrap!(sval.parse::<u64>())?
} else {
1
};
let max_occurs: Option<u64> = if let Some(sval) = map.get(MAX_OCCURS) {
if sval.as_str() == UNBOUNDED {
None
} else {
Some(wrap!(sval.parse::<u64>())?)
}
} else {
Some(1)
};
if let Some(the_max) = max_occurs {
if min_occurs > the_max {
return raise!(
"{} cannot be greater than {}, in this case {} is {} and {} is {}",
MIN_OCCURS,
MAX_OCCURS,
MIN_OCCURS,
min_occurs,
MAX_OCCURS,
the_max
);
}
}
Ok(Self {
min_occurs,
max_occurs,
})
}
}
#[test]
fn parse_occurs() {
let test_cases = vec![
(
r#"<xyz minOccurs="1"/>"#,
Occurs {
min_occurs: 1,
max_occurs: Some(1),
},
),
(
r#"<xyz maxOccurs="unbounded"/>"#,
Occurs {
min_occurs: 1,
max_occurs: None,
},
),
(
r#"<xyz/>"#,
Occurs {
min_occurs: 1,
max_occurs: Some(1),
},
),
(
r#"<xyz minOccurs="2" maxOccurs="3"/>"#,
Occurs {
min_occurs: 2,
|
];
for (xml, want) in test_cases {
let doc = exile::parse(xml).unwrap();
let got = Occurs::from_xml(doc.root()).unwrap();
assert_eq!(got, want)
}
}
#[test]
fn parse_occurs_err() {
let test_cases = vec![
r#"<xyz minOccurs="10" maxOccurs="1"/>"#,
r#"<xyz maxOccurs="unexpectedString"/>"#,
];
for xml in test_cases {
let doc = exile::parse(xml).unwrap();
assert!(Occurs::from_xml(doc.root()).is_err());
}
}
|
max_occurs: Some(3),
},
),
|
random_line_split
|
lib.rs
|
//! Capstone low-level binding for Rust.
//!
//! See [Capstone's C language documentation](http://www.capstone-engine.org/lang_c.html) for more
//! information.
//!
//! # Safety
//!
//! Obviously unsafe.
//!
//! Every `handle` shall be initialied first with `cs_open`.
//!
//! # Examples
//!
//! TODO
#![allow(non_camel_case_types)]
#![allow(dead_code)]
#![recursion_limit="1000"]
use std::os::raw::{c_void, c_int, c_uint, c_char};
pub type size_t = usize;
#[cfg(not(any(target_arch="x86_64",target_arch="i686")))]
mod placeholders {
include!(concat!(env!("OUT_DIR"), "/placeholders.rs"));
}
#[cfg(target_arch="x86_64")]
mod placeholders {
pub type detail_data = [u64; 185];
pub type arm64_op_data = [u64; 2];
pub type arm_op_data = [u64; 2];
pub type mips_op_data = [u64; 2];
pub type ppc_op_data = [u32; 3];
pub type sparc_op_data = [u32; 2];
pub type sysz_op_data = [u64; 3];
pub type x86_op_data = [u64; 3];
pub type xcore_op_data = [u32; 3];
}
#[cfg(target_arch="i686")]
mod placeholders {
pub type detail_data = [u32; 333];
pub type arm64_op_data = [u32; 3];
pub type arm_op_data = [u64; 2];
pub type mips_op_data = [u32; 3];
pub type ppc_op_data = [u32; 3];
pub type sparc_op_data = [u32; 2];
pub type sysz_op_data = [u32; 5];
pub type x86_op_data = [u32; 6];
pub type xcore_op_data = [u32; 3];
}
#[macro_use]
mod macros;
pub mod arm;
pub mod arm64;
pub mod mips;
pub mod ppc;
pub mod sparc;
pub mod sysz;
pub mod x86;
pub mod xcore;
// automatically generated by rust-bindgen
// then heavily modified
/// Handle to a Capstone context
///
/// 0 is not a valid context.
pub type csh = size_t;
fake_enum! {
/// Architecture type
pub enum cs_arch {
/// ARM architecture (including Thumb, Thumb-2)
CS_ARCH_ARM = 0,
/// ARM-64, also called AArch64
CS_ARCH_ARM64 = 1,
/// Mips architecture
CS_ARCH_MIPS = 2,
/// X86 architecture (including x86 & x86-64)
CS_ARCH_X86 = 3,
/// PowerPC architecture
CS_ARCH_PPC = 4,
/// Sparc architecture
CS_ARCH_SPARC = 5,
/// SystemZ architecture
CS_ARCH_SYSZ = 6,
/// XCore architecture
CS_ARCH_XCORE = 7,
CS_ARCH_MAX = 8,
/// All architecture for `cs_support`
CS_ARCH_ALL = 0xFFFF,
/// Support value to verify diet mode of the engine.
CS_SUPPORT_DIET = CS_ARCH_ALL+1,
/// Support value to verify X86 reduce mode of the engine.
CS_SUPPORT_X86_REDUCE = CS_ARCH_ALL+2,
}
}
fake_enum! {
/// Mode type (architecture variant, not all combination are possible)
pub enum cs_mode {
/// Little-endian mode (default mode)
CS_MODE_LITTLE_ENDIAN = 0,
/// 32-bit ARM
CS_MODE_ARM = 0,
/// 16-bit mode X86
CS_MODE_16 = 1 << 1,
/// 32-bit mode X86
CS_MODE_32 = 1 << 2,
/// 64-bit mode X86
CS_MODE_64 = 1 << 3,
/// ARM's Thumb mode, including Thumb-2
CS_MODE_THUMB = 1 << 4,
/// ARM's Cortex-M series
CS_MODE_MCLASS = 1 << 5,
/// ARMv8 A32 encodings for ARM
CS_MODE_V8 = 1 << 6,
/// MicroMips mode (MIPS)
CS_MODE_MICRO = 1 << 4,
/// Mips III ISA
CS_MODE_MIPS3 = 1 << 5,
/// Mips32r6 ISA
CS_MODE_MIPS32R6 = 1 << 6,
/// General Purpose Registers are 64-bit wide (MIPS)
CS_MODE_MIPSGP64 = 1 << 7,
/// SparcV9 mode (Sparc)
CS_MODE_V9 = 1 << 4,
/// big-endian mode
CS_MODE_BIG_ENDIAN = 1 << 31,
/// Mips32 ISA (Mips)
CS_MODE_MIPS32 = CS_MODE_32,
/// Mips64 ISA (Mips)
CS_MODE_MIPS64 = CS_MODE_64,
}
}
pub type cs_malloc_t = Option<extern "C" fn(size: size_t) -> *mut c_void>;
pub type cs_calloc_t = Option<extern "C" fn(nmemb: size_t, size: size_t) -> *mut c_void>;
pub type cs_realloc_t = Option<unsafe extern "C" fn(ptr: *mut c_void, size: size_t) -> *mut c_void>;
pub type cs_free_t = Option<unsafe extern "C" fn(ptr: *mut c_void)>;
pub type cs_vsnprintf_t = Option<unsafe extern "C" fn()>;
// pub type cs_vsnprintf_t = Option<unsafe extern "C" fn(str: *mut c_char,
// size: size_t,
// format: *const c_char,
// ap: va_list)
// -> c_int>;
#[repr(C)]
pub struct cs_opt_mem {
pub malloc: cs_malloc_t,
pub calloc: cs_calloc_t,
pub realloc: cs_realloc_t,
pub free: cs_free_t,
pub vsnprintf: cs_vsnprintf_t,
}
impl ::std::default::Default for cs_opt_mem {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
fake_enum! {
/// Runtime option for the disassembled engine
pub enum cs_opt_type {
/// Assembly output syntax
CS_OPT_SYNTAX = 1,
/// Break down instruction structure into details
CS_OPT_DETAIL,
/// Change engine's mode at run-time
CS_OPT_MODE,
/// User-defined dynamic memory related functions
CS_OPT_MEM,
|
CS_OPT_SKIPDATA_SETUP,
}
}
fake_enum! {
/// Runtime option value (associated with option type above)
pub enum cs_opt_value {
/// Turn OFF an option - default option of CS_OPT_DETAIL, CS_OPT_SKIPDATA.
CS_OPT_OFF = 0,
/// Turn ON an option (CS_OPT_DETAIL, CS_OPT_SKIPDATA).
CS_OPT_ON = 3,
/// Default asm syntax (CS_OPT_SYNTAX).
CS_OPT_SYNTAX_DEFAULT = 0,
/// X86 Intel asm syntax - default on X86 (CS_OPT_SYNTAX).
CS_OPT_SYNTAX_INTEL,
/// X86 ATT asm syntax (CS_OPT_SYNTAX).
CS_OPT_SYNTAX_ATT,
/// Prints register name with only number (CS_OPT_SYNTAX)
CS_OPT_SYNTAX_NOREGNAME,
}
}
fake_enum! {
/// Common instruction operand types - to be consistent across all architectures.
pub enum cs_op_type {
/// Uninitialized/invalid operand.
CS_OP_INVALID = 0,
/// Register operand.
CS_OP_REG = 1,
/// Immediate operand.
CS_OP_IMM = 2,
/// Memory operand.
CS_OP_MEM = 3,
/// Floating-Point operand.
CS_OP_FP = 4,
}
}
fake_enum! {
/// Common instruction groups - to be consistent across all architectures.
pub enum cs_group_type {
/// uninitialized/invalid group.
CS_GRP_INVALID = 0,
/// all jump instructions (conditional+direct+indirect jumps)
CS_GRP_JUMP,
/// all call instructions
CS_GRP_CALL,
/// all return instructions
CS_GRP_RET,
/// all interrupt instructions (int+syscall)
CS_GRP_INT,
/// all interrupt return instructions
CS_GRP_IRET,
}
}
pub type cs_skipdata_cb_t = Option<unsafe extern "C" fn(code: *const u8,
code_size: size_t,
offset: size_t,
user_data: *mut c_void)
-> size_t>;
#[repr(C)]
pub struct cs_opt_skipdata {
pub mnemonic: *const c_char,
pub callback: cs_skipdata_cb_t,
pub user_data: *mut c_void,
}
impl ::std::default::Default for cs_opt_skipdata {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[repr(C)]
pub struct cs_detail {
pub regs_read: [u8; 12usize],
pub regs_read_count: u8,
pub regs_write: [u8; 20usize],
pub regs_write_count: u8,
pub groups: [u8; 8usize],
pub groups_count: u8,
data: placeholders::detail_data,
}
impl cs_detail {
pub unsafe fn x86(&self) -> &x86::cs_x86 {
::std::mem::transmute(&self.data)
}
pub unsafe fn arm64(&self) -> &arm64::cs_arm64 {
::std::mem::transmute(&self.data)
}
pub unsafe fn arm(&self) -> &arm::cs_arm {
::std::mem::transmute(&self.data)
}
pub unsafe fn mips(&self) -> &mips::cs_mips {
::std::mem::transmute(&self.data)
}
pub unsafe fn ppc(&self) -> &ppc::cs_ppc {
::std::mem::transmute(&self.data)
}
pub unsafe fn sparc(&self) -> &sparc::cs_sparc {
::std::mem::transmute(&self.data)
}
pub unsafe fn sysz(&self) -> &sysz::cs_sysz {
::std::mem::transmute(&self.data)
}
pub unsafe fn xcore(&self) -> &xcore::cs_xcore {
::std::mem::transmute(&self.data)
}
}
/// Information on a disassembled instruction
#[repr(C)]
pub struct cs_insn {
/// Architecture-dependent instruction identifier, see `<ARCH>_INS_*`
pub id: c_uint,
/// Base address
pub address: u64,
/// Size of the instruction
pub size: u16,
/// Bytes of the instruction
pub bytes: [u8; 16usize],
/// C-string of the mnemonic
pub mnemonic: [c_char; 32usize],
/// C-string of the operands
pub op_str: [c_char; 160usize],
/// More details available if option `CS_OPTION_DETAIL` is on and if option
/// `CS_OPTION_SKIPDATA` is not on
pub detail: *mut cs_detail,
}
fake_enum! {
/// All type of errors encountered by Capstone API.
/// These are values returned by cs_errno()
pub enum cs_err {
/// No error: everything was fine
CS_ERR_OK = 0,
/// Out-Of-Memory error: cs_open(), cs_disasm(), cs_disasm_iter()
CS_ERR_MEM,
/// Unsupported architecture: cs_open()
CS_ERR_ARCH,
/// Invalid handle: cs_op_count(), cs_op_index()
CS_ERR_HANDLE,
/// Invalid csh argument: cs_close(), cs_errno(), cs_option()
CS_ERR_CSH,
/// Invalid/unsupported mode: cs_open()
CS_ERR_MODE,
/// Invalid/unsupported option: cs_option()
CS_ERR_OPTION,
/// Information is unavailable because detail option is OFF
CS_ERR_DETAIL,
/// Dynamic memory management uninitialized (see CS_OPT_MEM)
CS_ERR_MEMSETUP,
/// Unsupported version (bindings)
CS_ERR_VERSION,
/// Access irrelevant data in "diet" engine
CS_ERR_DIET,
/// Access irrelevant data for "data" instruction in SKIPDATA mode
CS_ERR_SKIPDATA,
/// X86 AT&T syntax is unsupported (opt-out at compile time)
CS_ERR_X86_ATT,
/// X86 Intel syntax is unsupported (opt-out at compile time)
CS_ERR_X86_INTEL,
}
}
#[link(name = "capstone", kind = "dylib")]
extern "C" {
/// Return combined API version & major and minor version numbers.
pub fn cs_version(major: *mut c_int, minor: *mut c_int) -> c_uint;
pub fn cs_support(query: c_int) -> u8;
/// Initialize a Capstone `handle` (non-null pointer) for a given architecture type `arch`
/// (`CS_ARCH_*`) and hardware `mode` (`CS_MODE_*`).
///
/// Returns CS_ERR_OK on success, or other value on failure (refer to cs_err enum for detailed
/// error).
pub fn cs_open(arch: cs_arch, mode: cs_mode, handle: *mut csh) -> cs_err;
/// Close a Capstone `handle` (and zeroed it).
///
/// Release the handle when it is not used anymore but only when there is no
/// longer usage of Capstone, in particular no access to `cs_insn` array.
pub fn cs_close(handle: *mut csh) -> cs_err;
/// Set option `typ` with given `value` for disassembling engine at runtime.
pub fn cs_option(handle: csh, typ: cs_opt_type, value: size_t) -> cs_err;
/// Report the last error number for the given Capstone `handle` when some API function fail.
/// Like glibc's `errno`, `cs_errno` might not retain its old value once accessed.
pub fn cs_errno(handle: csh) -> cs_err;
/// Return a string describing given error `code`.
pub fn cs_strerror(code: cs_err) -> *const c_char;
/// Disassemble binary code in context of `handle`, given the `code` buffer of size
/// `code_size`, the base `address` and the desired number (`count`) of instructions to decode
/// and set a pointer to an array of instructions and returns the number of decoded
/// instructions and the size of the buffers.
///
/// # Safety
///
/// * `code` shall be valid and points to an array of bytes of at least `code_size`.
/// * `insn` shall be valid.
pub fn cs_disasm(handle: csh,
code: *const u8,
code_size: size_t,
address: u64,
count: size_t,
insn: *mut *mut cs_insn)
-> size_t;
/// Free a Capstone allocated array of instruction.
///
/// # Safety
///
/// `insn` shall originate either from a previous call to `cs_malloc`, in which case the count
/// should be 1, or `cs_disasm` in which case the count should be the return value of
/// `cs_disasm`
pub fn cs_free(insn: *mut cs_insn, count: size_t);
/// Allocate a single instruction to be freed with `cs_free(insn, 1)`.
pub fn cs_malloc(handle: csh) -> *mut cs_insn;
/// Fast API to disassemble binary code, given the code buffer, size, address and number of
/// instructions to be decoded.
pub fn cs_disasm_iter(handle: csh,
code: *mut *const u8,
size: *mut size_t,
address: *mut u64,
insn: *mut cs_insn)
-> u8;
pub fn cs_reg_name(handle: csh, reg_id: c_uint) -> *const c_char;
pub fn cs_insn_name(handle: csh, insn_id: c_uint) -> *const c_char;
pub fn cs_group_name(handle: csh, group_id: c_uint) -> *const c_char;
pub fn cs_insn_group(handle: csh, insn: *const cs_insn, group_id: c_uint) -> u8;
pub fn cs_reg_read(handle: csh, insn: *const cs_insn, reg_id: c_uint) -> u8;
pub fn cs_reg_write(handle: csh, insn: *const cs_insn, reg_id: c_uint) -> u8;
pub fn cs_op_count(handle: csh, insn: *const cs_insn, op_type: c_uint) -> c_int;
pub fn cs_op_index(handle: csh,
insn: *const cs_insn,
op_type: c_uint,
position: c_uint)
-> c_int;
}
|
/// Skip data when disassembling. Then engine is in SKIPDATA mode.
CS_OPT_SKIPDATA,
/// Setup user-defined function for SKIPDATA option
|
random_line_split
|
lib.rs
|
//! Capstone low-level binding for Rust.
//!
//! See [Capstone's C language documentation](http://www.capstone-engine.org/lang_c.html) for more
//! information.
//!
//! # Safety
//!
//! Obviously unsafe.
//!
//! Every `handle` shall be initialied first with `cs_open`.
//!
//! # Examples
//!
//! TODO
#![allow(non_camel_case_types)]
#![allow(dead_code)]
#![recursion_limit="1000"]
use std::os::raw::{c_void, c_int, c_uint, c_char};
pub type size_t = usize;
#[cfg(not(any(target_arch="x86_64",target_arch="i686")))]
mod placeholders {
include!(concat!(env!("OUT_DIR"), "/placeholders.rs"));
}
#[cfg(target_arch="x86_64")]
mod placeholders {
pub type detail_data = [u64; 185];
pub type arm64_op_data = [u64; 2];
pub type arm_op_data = [u64; 2];
pub type mips_op_data = [u64; 2];
pub type ppc_op_data = [u32; 3];
pub type sparc_op_data = [u32; 2];
pub type sysz_op_data = [u64; 3];
pub type x86_op_data = [u64; 3];
pub type xcore_op_data = [u32; 3];
}
#[cfg(target_arch="i686")]
mod placeholders {
pub type detail_data = [u32; 333];
pub type arm64_op_data = [u32; 3];
pub type arm_op_data = [u64; 2];
pub type mips_op_data = [u32; 3];
pub type ppc_op_data = [u32; 3];
pub type sparc_op_data = [u32; 2];
pub type sysz_op_data = [u32; 5];
pub type x86_op_data = [u32; 6];
pub type xcore_op_data = [u32; 3];
}
#[macro_use]
mod macros;
pub mod arm;
pub mod arm64;
pub mod mips;
pub mod ppc;
pub mod sparc;
pub mod sysz;
pub mod x86;
pub mod xcore;
// automatically generated by rust-bindgen
// then heavily modified
/// Handle to a Capstone context
///
/// 0 is not a valid context.
pub type csh = size_t;
fake_enum! {
/// Architecture type
pub enum cs_arch {
/// ARM architecture (including Thumb, Thumb-2)
CS_ARCH_ARM = 0,
/// ARM-64, also called AArch64
CS_ARCH_ARM64 = 1,
/// Mips architecture
CS_ARCH_MIPS = 2,
/// X86 architecture (including x86 & x86-64)
CS_ARCH_X86 = 3,
/// PowerPC architecture
CS_ARCH_PPC = 4,
/// Sparc architecture
CS_ARCH_SPARC = 5,
/// SystemZ architecture
CS_ARCH_SYSZ = 6,
/// XCore architecture
CS_ARCH_XCORE = 7,
CS_ARCH_MAX = 8,
/// All architecture for `cs_support`
CS_ARCH_ALL = 0xFFFF,
/// Support value to verify diet mode of the engine.
CS_SUPPORT_DIET = CS_ARCH_ALL+1,
/// Support value to verify X86 reduce mode of the engine.
CS_SUPPORT_X86_REDUCE = CS_ARCH_ALL+2,
}
}
fake_enum! {
/// Mode type (architecture variant, not all combination are possible)
pub enum cs_mode {
/// Little-endian mode (default mode)
CS_MODE_LITTLE_ENDIAN = 0,
/// 32-bit ARM
CS_MODE_ARM = 0,
/// 16-bit mode X86
CS_MODE_16 = 1 << 1,
/// 32-bit mode X86
CS_MODE_32 = 1 << 2,
/// 64-bit mode X86
CS_MODE_64 = 1 << 3,
/// ARM's Thumb mode, including Thumb-2
CS_MODE_THUMB = 1 << 4,
/// ARM's Cortex-M series
CS_MODE_MCLASS = 1 << 5,
/// ARMv8 A32 encodings for ARM
CS_MODE_V8 = 1 << 6,
/// MicroMips mode (MIPS)
CS_MODE_MICRO = 1 << 4,
/// Mips III ISA
CS_MODE_MIPS3 = 1 << 5,
/// Mips32r6 ISA
CS_MODE_MIPS32R6 = 1 << 6,
/// General Purpose Registers are 64-bit wide (MIPS)
CS_MODE_MIPSGP64 = 1 << 7,
/// SparcV9 mode (Sparc)
CS_MODE_V9 = 1 << 4,
/// big-endian mode
CS_MODE_BIG_ENDIAN = 1 << 31,
/// Mips32 ISA (Mips)
CS_MODE_MIPS32 = CS_MODE_32,
/// Mips64 ISA (Mips)
CS_MODE_MIPS64 = CS_MODE_64,
}
}
pub type cs_malloc_t = Option<extern "C" fn(size: size_t) -> *mut c_void>;
pub type cs_calloc_t = Option<extern "C" fn(nmemb: size_t, size: size_t) -> *mut c_void>;
pub type cs_realloc_t = Option<unsafe extern "C" fn(ptr: *mut c_void, size: size_t) -> *mut c_void>;
pub type cs_free_t = Option<unsafe extern "C" fn(ptr: *mut c_void)>;
pub type cs_vsnprintf_t = Option<unsafe extern "C" fn()>;
// pub type cs_vsnprintf_t = Option<unsafe extern "C" fn(str: *mut c_char,
// size: size_t,
// format: *const c_char,
// ap: va_list)
// -> c_int>;
#[repr(C)]
pub struct cs_opt_mem {
pub malloc: cs_malloc_t,
pub calloc: cs_calloc_t,
pub realloc: cs_realloc_t,
pub free: cs_free_t,
pub vsnprintf: cs_vsnprintf_t,
}
impl ::std::default::Default for cs_opt_mem {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
fake_enum! {
/// Runtime option for the disassembled engine
pub enum cs_opt_type {
/// Assembly output syntax
CS_OPT_SYNTAX = 1,
/// Break down instruction structure into details
CS_OPT_DETAIL,
/// Change engine's mode at run-time
CS_OPT_MODE,
/// User-defined dynamic memory related functions
CS_OPT_MEM,
/// Skip data when disassembling. Then engine is in SKIPDATA mode.
CS_OPT_SKIPDATA,
/// Setup user-defined function for SKIPDATA option
CS_OPT_SKIPDATA_SETUP,
}
}
fake_enum! {
/// Runtime option value (associated with option type above)
pub enum cs_opt_value {
/// Turn OFF an option - default option of CS_OPT_DETAIL, CS_OPT_SKIPDATA.
CS_OPT_OFF = 0,
/// Turn ON an option (CS_OPT_DETAIL, CS_OPT_SKIPDATA).
CS_OPT_ON = 3,
/// Default asm syntax (CS_OPT_SYNTAX).
CS_OPT_SYNTAX_DEFAULT = 0,
/// X86 Intel asm syntax - default on X86 (CS_OPT_SYNTAX).
CS_OPT_SYNTAX_INTEL,
/// X86 ATT asm syntax (CS_OPT_SYNTAX).
CS_OPT_SYNTAX_ATT,
/// Prints register name with only number (CS_OPT_SYNTAX)
CS_OPT_SYNTAX_NOREGNAME,
}
}
fake_enum! {
/// Common instruction operand types - to be consistent across all architectures.
pub enum cs_op_type {
/// Uninitialized/invalid operand.
CS_OP_INVALID = 0,
/// Register operand.
CS_OP_REG = 1,
/// Immediate operand.
CS_OP_IMM = 2,
/// Memory operand.
CS_OP_MEM = 3,
/// Floating-Point operand.
CS_OP_FP = 4,
}
}
fake_enum! {
/// Common instruction groups - to be consistent across all architectures.
pub enum cs_group_type {
/// uninitialized/invalid group.
CS_GRP_INVALID = 0,
/// all jump instructions (conditional+direct+indirect jumps)
CS_GRP_JUMP,
/// all call instructions
CS_GRP_CALL,
/// all return instructions
CS_GRP_RET,
/// all interrupt instructions (int+syscall)
CS_GRP_INT,
/// all interrupt return instructions
CS_GRP_IRET,
}
}
pub type cs_skipdata_cb_t = Option<unsafe extern "C" fn(code: *const u8,
code_size: size_t,
offset: size_t,
user_data: *mut c_void)
-> size_t>;
#[repr(C)]
pub struct cs_opt_skipdata {
pub mnemonic: *const c_char,
pub callback: cs_skipdata_cb_t,
pub user_data: *mut c_void,
}
impl ::std::default::Default for cs_opt_skipdata {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[repr(C)]
pub struct cs_detail {
pub regs_read: [u8; 12usize],
pub regs_read_count: u8,
pub regs_write: [u8; 20usize],
pub regs_write_count: u8,
pub groups: [u8; 8usize],
pub groups_count: u8,
data: placeholders::detail_data,
}
impl cs_detail {
pub unsafe fn x86(&self) -> &x86::cs_x86 {
::std::mem::transmute(&self.data)
}
pub unsafe fn arm64(&self) -> &arm64::cs_arm64 {
::std::mem::transmute(&self.data)
}
pub unsafe fn arm(&self) -> &arm::cs_arm {
::std::mem::transmute(&self.data)
}
pub unsafe fn mips(&self) -> &mips::cs_mips {
::std::mem::transmute(&self.data)
}
pub unsafe fn ppc(&self) -> &ppc::cs_ppc {
::std::mem::transmute(&self.data)
}
pub unsafe fn
|
(&self) -> &sparc::cs_sparc {
::std::mem::transmute(&self.data)
}
pub unsafe fn sysz(&self) -> &sysz::cs_sysz {
::std::mem::transmute(&self.data)
}
pub unsafe fn xcore(&self) -> &xcore::cs_xcore {
::std::mem::transmute(&self.data)
}
}
/// Information on a disassembled instruction
#[repr(C)]
pub struct cs_insn {
/// Architecture-dependent instruction identifier, see `<ARCH>_INS_*`
pub id: c_uint,
/// Base address
pub address: u64,
/// Size of the instruction
pub size: u16,
/// Bytes of the instruction
pub bytes: [u8; 16usize],
/// C-string of the mnemonic
pub mnemonic: [c_char; 32usize],
/// C-string of the operands
pub op_str: [c_char; 160usize],
/// More details available if option `CS_OPTION_DETAIL` is on and if option
/// `CS_OPTION_SKIPDATA` is not on
pub detail: *mut cs_detail,
}
fake_enum! {
/// All type of errors encountered by Capstone API.
/// These are values returned by cs_errno()
pub enum cs_err {
/// No error: everything was fine
CS_ERR_OK = 0,
/// Out-Of-Memory error: cs_open(), cs_disasm(), cs_disasm_iter()
CS_ERR_MEM,
/// Unsupported architecture: cs_open()
CS_ERR_ARCH,
/// Invalid handle: cs_op_count(), cs_op_index()
CS_ERR_HANDLE,
/// Invalid csh argument: cs_close(), cs_errno(), cs_option()
CS_ERR_CSH,
/// Invalid/unsupported mode: cs_open()
CS_ERR_MODE,
/// Invalid/unsupported option: cs_option()
CS_ERR_OPTION,
/// Information is unavailable because detail option is OFF
CS_ERR_DETAIL,
/// Dynamic memory management uninitialized (see CS_OPT_MEM)
CS_ERR_MEMSETUP,
/// Unsupported version (bindings)
CS_ERR_VERSION,
/// Access irrelevant data in "diet" engine
CS_ERR_DIET,
/// Access irrelevant data for "data" instruction in SKIPDATA mode
CS_ERR_SKIPDATA,
/// X86 AT&T syntax is unsupported (opt-out at compile time)
CS_ERR_X86_ATT,
/// X86 Intel syntax is unsupported (opt-out at compile time)
CS_ERR_X86_INTEL,
}
}
#[link(name = "capstone", kind = "dylib")]
extern "C" {
/// Return combined API version & major and minor version numbers.
pub fn cs_version(major: *mut c_int, minor: *mut c_int) -> c_uint;
pub fn cs_support(query: c_int) -> u8;
/// Initialize a Capstone `handle` (non-null pointer) for a given architecture type `arch`
/// (`CS_ARCH_*`) and hardware `mode` (`CS_MODE_*`).
///
/// Returns CS_ERR_OK on success, or other value on failure (refer to cs_err enum for detailed
/// error).
pub fn cs_open(arch: cs_arch, mode: cs_mode, handle: *mut csh) -> cs_err;
/// Close a Capstone `handle` (and zeroed it).
///
/// Release the handle when it is not used anymore but only when there is no
/// longer usage of Capstone, in particular no access to `cs_insn` array.
pub fn cs_close(handle: *mut csh) -> cs_err;
/// Set option `typ` with given `value` for disassembling engine at runtime.
pub fn cs_option(handle: csh, typ: cs_opt_type, value: size_t) -> cs_err;
/// Report the last error number for the given Capstone `handle` when some API function fail.
/// Like glibc's `errno`, `cs_errno` might not retain its old value once accessed.
pub fn cs_errno(handle: csh) -> cs_err;
/// Return a string describing given error `code`.
pub fn cs_strerror(code: cs_err) -> *const c_char;
/// Disassemble binary code in context of `handle`, given the `code` buffer of size
/// `code_size`, the base `address` and the desired number (`count`) of instructions to decode
/// and set a pointer to an array of instructions and returns the number of decoded
/// instructions and the size of the buffers.
///
/// # Safety
///
/// * `code` shall be valid and points to an array of bytes of at least `code_size`.
/// * `insn` shall be valid.
pub fn cs_disasm(handle: csh,
code: *const u8,
code_size: size_t,
address: u64,
count: size_t,
insn: *mut *mut cs_insn)
-> size_t;
/// Free a Capstone allocated array of instruction.
///
/// # Safety
///
/// `insn` shall originate either from a previous call to `cs_malloc`, in which case the count
/// should be 1, or `cs_disasm` in which case the count should be the return value of
/// `cs_disasm`
pub fn cs_free(insn: *mut cs_insn, count: size_t);
/// Allocate a single instruction to be freed with `cs_free(insn, 1)`.
pub fn cs_malloc(handle: csh) -> *mut cs_insn;
/// Fast API to disassemble binary code, given the code buffer, size, address and number of
/// instructions to be decoded.
pub fn cs_disasm_iter(handle: csh,
code: *mut *const u8,
size: *mut size_t,
address: *mut u64,
insn: *mut cs_insn)
-> u8;
pub fn cs_reg_name(handle: csh, reg_id: c_uint) -> *const c_char;
pub fn cs_insn_name(handle: csh, insn_id: c_uint) -> *const c_char;
pub fn cs_group_name(handle: csh, group_id: c_uint) -> *const c_char;
pub fn cs_insn_group(handle: csh, insn: *const cs_insn, group_id: c_uint) -> u8;
pub fn cs_reg_read(handle: csh, insn: *const cs_insn, reg_id: c_uint) -> u8;
pub fn cs_reg_write(handle: csh, insn: *const cs_insn, reg_id: c_uint) -> u8;
pub fn cs_op_count(handle: csh, insn: *const cs_insn, op_type: c_uint) -> c_int;
pub fn cs_op_index(handle: csh,
insn: *const cs_insn,
op_type: c_uint,
position: c_uint)
-> c_int;
}
|
sparc
|
identifier_name
|
seq.rs
|
extern crate libc;
use std::fmt;
use std::default::Default;
use self::libc::{c_double};
/// Sequences are used for indexing Arrays
#[derive(Copy, Clone)]
#[repr(C)]
pub struct Seq {
begin: c_double,
end: c_double,
step: c_double,
}
/// Default `Seq` spans all the elements along a dimension
impl Default for Seq {
fn default() -> Seq
|
}
/// Enables use of `Seq` with `{}` format in print statements
impl fmt::Display for Seq {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[begin: {}, end: {}, step: {}]", self.begin, self.end, self.step)
}
}
impl Seq {
/// Create a `Seq` that goes from `begin` to `end` at a step size of `step`
pub fn new(begin: f64, end: f64, step: f64) -> Seq {
Seq { begin: begin, end: end, step: step, }
}
/// Get begin index of Seq
pub fn begin(&self) -> f64 {
self.begin as f64
}
/// Get begin index of Seq
pub fn end(&self) -> f64 {
self.end as f64
}
/// Get step size of Seq
pub fn step(&self) -> f64 {
self.step as f64
}
}
|
{
Seq { begin: 1.0, end: 1.0, step: 0.0, }
}
|
identifier_body
|
seq.rs
|
extern crate libc;
use std::fmt;
use std::default::Default;
use self::libc::{c_double};
/// Sequences are used for indexing Arrays
#[derive(Copy, Clone)]
#[repr(C)]
pub struct
|
{
begin: c_double,
end: c_double,
step: c_double,
}
/// Default `Seq` spans all the elements along a dimension
impl Default for Seq {
fn default() -> Seq {
Seq { begin: 1.0, end: 1.0, step: 0.0, }
}
}
/// Enables use of `Seq` with `{}` format in print statements
impl fmt::Display for Seq {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[begin: {}, end: {}, step: {}]", self.begin, self.end, self.step)
}
}
impl Seq {
/// Create a `Seq` that goes from `begin` to `end` at a step size of `step`
pub fn new(begin: f64, end: f64, step: f64) -> Seq {
Seq { begin: begin, end: end, step: step, }
}
/// Get begin index of Seq
pub fn begin(&self) -> f64 {
self.begin as f64
}
/// Get begin index of Seq
pub fn end(&self) -> f64 {
self.end as f64
}
/// Get step size of Seq
pub fn step(&self) -> f64 {
self.step as f64
}
}
|
Seq
|
identifier_name
|
seq.rs
|
extern crate libc;
use std::fmt;
use std::default::Default;
use self::libc::{c_double};
/// Sequences are used for indexing Arrays
#[derive(Copy, Clone)]
#[repr(C)]
pub struct Seq {
begin: c_double,
end: c_double,
|
step: c_double,
}
/// Default `Seq` spans all the elements along a dimension
impl Default for Seq {
fn default() -> Seq {
Seq { begin: 1.0, end: 1.0, step: 0.0, }
}
}
/// Enables use of `Seq` with `{}` format in print statements
impl fmt::Display for Seq {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[begin: {}, end: {}, step: {}]", self.begin, self.end, self.step)
}
}
impl Seq {
/// Create a `Seq` that goes from `begin` to `end` at a step size of `step`
pub fn new(begin: f64, end: f64, step: f64) -> Seq {
Seq { begin: begin, end: end, step: step, }
}
/// Get begin index of Seq
pub fn begin(&self) -> f64 {
self.begin as f64
}
/// Get begin index of Seq
pub fn end(&self) -> f64 {
self.end as f64
}
/// Get step size of Seq
pub fn step(&self) -> f64 {
self.step as f64
}
}
|
random_line_split
|
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
use std::io;
use rocket::request::{Form, FromFormValue};
use rocket::response::NamedFile;
use rocket::http::RawStr;
// TODO: Make deriving `FromForm` for this enum possible.
#[derive(Debug)]
enum FormOption {
A, B, C
}
impl<'v> FromFormValue<'v> for FormOption {
type Error = &'v RawStr;
fn from_form_value(v: &'v RawStr) -> Result<Self, Self::Error> {
let variant = match v.as_str() {
"a" => FormOption::A,
"b" => FormOption::B,
"c" => FormOption::C,
_ => return Err(v)
};
|
}
}
#[derive(Debug, FromForm)]
struct FormInput {
checkbox: bool,
number: usize,
#[form(field = "type")]
radio: FormOption,
password: String,
#[form(field = "textarea")]
text_area: String,
select: FormOption,
}
#[post("/", data = "<sink>")]
fn sink(sink: Result<Form<FormInput>, Option<String>>) -> String {
match sink {
Ok(form) => format!("{:?}", form.get()),
Err(Some(f)) => format!("Invalid form input: {}", f),
Err(None) => format!("Form input was invalid UTF8."),
}
}
#[get("/")]
fn index() -> io::Result<NamedFile> {
NamedFile::open("static/index.html")
}
fn main() {
rocket::ignite()
.mount("/", routes![index, sink])
.launch();
}
|
Ok(variant)
|
random_line_split
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
use std::io;
use rocket::request::{Form, FromFormValue};
use rocket::response::NamedFile;
use rocket::http::RawStr;
// TODO: Make deriving `FromForm` for this enum possible.
#[derive(Debug)]
enum FormOption {
A, B, C
}
impl<'v> FromFormValue<'v> for FormOption {
type Error = &'v RawStr;
fn from_form_value(v: &'v RawStr) -> Result<Self, Self::Error> {
let variant = match v.as_str() {
"a" => FormOption::A,
"b" => FormOption::B,
"c" => FormOption::C,
_ => return Err(v)
};
Ok(variant)
}
}
#[derive(Debug, FromForm)]
struct FormInput {
checkbox: bool,
number: usize,
#[form(field = "type")]
radio: FormOption,
password: String,
#[form(field = "textarea")]
text_area: String,
select: FormOption,
}
#[post("/", data = "<sink>")]
fn sink(sink: Result<Form<FormInput>, Option<String>>) -> String {
match sink {
Ok(form) => format!("{:?}", form.get()),
Err(Some(f)) => format!("Invalid form input: {}", f),
Err(None) => format!("Form input was invalid UTF8."),
}
}
#[get("/")]
fn index() -> io::Result<NamedFile> {
NamedFile::open("static/index.html")
}
fn
|
() {
rocket::ignite()
.mount("/", routes![index, sink])
.launch();
}
|
main
|
identifier_name
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
use std::io;
use rocket::request::{Form, FromFormValue};
use rocket::response::NamedFile;
use rocket::http::RawStr;
// TODO: Make deriving `FromForm` for this enum possible.
#[derive(Debug)]
enum FormOption {
A, B, C
}
impl<'v> FromFormValue<'v> for FormOption {
type Error = &'v RawStr;
fn from_form_value(v: &'v RawStr) -> Result<Self, Self::Error> {
let variant = match v.as_str() {
"a" => FormOption::A,
"b" => FormOption::B,
"c" => FormOption::C,
_ => return Err(v)
};
Ok(variant)
}
}
#[derive(Debug, FromForm)]
struct FormInput {
checkbox: bool,
number: usize,
#[form(field = "type")]
radio: FormOption,
password: String,
#[form(field = "textarea")]
text_area: String,
select: FormOption,
}
#[post("/", data = "<sink>")]
fn sink(sink: Result<Form<FormInput>, Option<String>>) -> String
|
#[get("/")]
fn index() -> io::Result<NamedFile> {
NamedFile::open("static/index.html")
}
fn main() {
rocket::ignite()
.mount("/", routes![index, sink])
.launch();
}
|
{
match sink {
Ok(form) => format!("{:?}", form.get()),
Err(Some(f)) => format!("Invalid form input: {}", f),
Err(None) => format!("Form input was invalid UTF8."),
}
}
|
identifier_body
|
mod.rs
|
// Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
/// `FileHelper` provides functions for CRUD on file.
pub mod file_helper;
mod data_map;
mod dir;
mod errors;
mod file;
mod reader;
#[cfg(test)]
mod tests;
mod writer;
pub use self::dir::create_dir;
pub use self::errors::NfsError;
pub use self::file::File;
pub use self::reader::Reader;
pub use self::writer::{Mode, Writer};
use futures::Future;
|
/// Helper type for futures that can result in `NfsError`.
pub type NfsFuture<T> = Future<Item = T, Error = NfsError>;
|
random_line_split
|
|
api_agent.rs
|
use actix::prelude::*;
use actix_web::*;
use bytes::Bytes;
use crate::actors::{ForwardA2AMsg, GetEndpoint};
use crate::app::AppData;
const MAX_PAYLOAD_SIZE: usize = 105_906_176;
pub fn generate_api_agent_configuration(api_prefix: &String) -> Box<dyn FnOnce(&mut web::ServiceConfig)>{
let prefix = api_prefix.clone();
Box::new( move |cfg: &mut web::ServiceConfig| {
cfg.service(
web::scope(&prefix)
.route("", web::get().to(_get_endpoint_details))
.route("/msg", web::post().to(_forward_message)));
})
}
fn
|
(state: web::Data<AppData>) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
let f = state.forward_agent
.send(GetEndpoint {})
.from_err()
.map(|res| match res {
Ok(endpoint) => HttpResponse::Ok().json(&endpoint),
Err(err) => HttpResponse::InternalServerError().body(format!("{:?}", err)).into(), // FIXME: Better error
});
Box::new(f)
}
/// The entry point of any agent communication. Incoming data are passed to Forward Agent for processing
fn _forward_message(state: web::Data<AppData>, stream: web::Payload) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
let f = stream.map_err(Error::from)
.fold(web::BytesMut::new(), move |mut body, chunk| {
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_PAYLOAD_SIZE {
Err(error::ErrorBadRequest("Payload exceeded maximum size."))
} else {
body.extend_from_slice(&chunk);
Ok::<_, Error>(body)
}
})
.and_then(move |body| {
state.forward_agent
.send(ForwardA2AMsg(body.to_vec()))
.from_err()
.and_then(|res| match res {
Ok(msg) => Ok(Bytes::from(msg).into()),
Err(err) => Ok(HttpResponse::InternalServerError().body(format!("{:?}", err)).into()), // FIXME: Better error
})
});
Box::new(f)
}
|
_get_endpoint_details
|
identifier_name
|
api_agent.rs
|
use actix::prelude::*;
use actix_web::*;
use bytes::Bytes;
use crate::actors::{ForwardA2AMsg, GetEndpoint};
use crate::app::AppData;
const MAX_PAYLOAD_SIZE: usize = 105_906_176;
pub fn generate_api_agent_configuration(api_prefix: &String) -> Box<dyn FnOnce(&mut web::ServiceConfig)>{
let prefix = api_prefix.clone();
Box::new( move |cfg: &mut web::ServiceConfig| {
cfg.service(
web::scope(&prefix)
.route("", web::get().to(_get_endpoint_details))
.route("/msg", web::post().to(_forward_message)));
})
}
fn _get_endpoint_details(state: web::Data<AppData>) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
let f = state.forward_agent
.send(GetEndpoint {})
.from_err()
.map(|res| match res {
Ok(endpoint) => HttpResponse::Ok().json(&endpoint),
Err(err) => HttpResponse::InternalServerError().body(format!("{:?}", err)).into(), // FIXME: Better error
});
Box::new(f)
}
/// The entry point of any agent communication. Incoming data are passed to Forward Agent for processing
fn _forward_message(state: web::Data<AppData>, stream: web::Payload) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
let f = stream.map_err(Error::from)
.fold(web::BytesMut::new(), move |mut body, chunk| {
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_PAYLOAD_SIZE {
Err(error::ErrorBadRequest("Payload exceeded maximum size."))
} else {
body.extend_from_slice(&chunk);
Ok::<_, Error>(body)
}
})
.and_then(move |body| {
state.forward_agent
.send(ForwardA2AMsg(body.to_vec()))
.from_err()
|
.and_then(|res| match res {
Ok(msg) => Ok(Bytes::from(msg).into()),
Err(err) => Ok(HttpResponse::InternalServerError().body(format!("{:?}", err)).into()), // FIXME: Better error
})
});
Box::new(f)
}
|
random_line_split
|
|
api_agent.rs
|
use actix::prelude::*;
use actix_web::*;
use bytes::Bytes;
use crate::actors::{ForwardA2AMsg, GetEndpoint};
use crate::app::AppData;
const MAX_PAYLOAD_SIZE: usize = 105_906_176;
pub fn generate_api_agent_configuration(api_prefix: &String) -> Box<dyn FnOnce(&mut web::ServiceConfig)>
|
fn _get_endpoint_details(state: web::Data<AppData>) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
let f = state.forward_agent
.send(GetEndpoint {})
.from_err()
.map(|res| match res {
Ok(endpoint) => HttpResponse::Ok().json(&endpoint),
Err(err) => HttpResponse::InternalServerError().body(format!("{:?}", err)).into(), // FIXME: Better error
});
Box::new(f)
}
/// The entry point of any agent communication. Incoming data are passed to Forward Agent for processing
fn _forward_message(state: web::Data<AppData>, stream: web::Payload) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
let f = stream.map_err(Error::from)
.fold(web::BytesMut::new(), move |mut body, chunk| {
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_PAYLOAD_SIZE {
Err(error::ErrorBadRequest("Payload exceeded maximum size."))
} else {
body.extend_from_slice(&chunk);
Ok::<_, Error>(body)
}
})
.and_then(move |body| {
state.forward_agent
.send(ForwardA2AMsg(body.to_vec()))
.from_err()
.and_then(|res| match res {
Ok(msg) => Ok(Bytes::from(msg).into()),
Err(err) => Ok(HttpResponse::InternalServerError().body(format!("{:?}", err)).into()), // FIXME: Better error
})
});
Box::new(f)
}
|
{
let prefix = api_prefix.clone();
Box::new( move |cfg: &mut web::ServiceConfig| {
cfg.service(
web::scope(&prefix)
.route("", web::get().to(_get_endpoint_details))
.route("/msg", web::post().to(_forward_message)));
})
}
|
identifier_body
|
jstraceable.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ext::base::ExtCtxt;
use syntax::codemap::Span;
use syntax::ptr::P;
use syntax::ast::{Item, MetaItem, Expr};
use syntax::ast;
use syntax::attr;
use syntax::ext::build::AstBuilder;
use syntax::ext::deriving::generic::{combine_substructure, EnumMatching, FieldInfo, MethodDef, Struct, Substructure, TraitDef, ty};
use syntax::parse::token::InternedString;
pub fn expand_dom_struct(_: &mut ExtCtxt, _: Span, _: &MetaItem, item: P<Item>) -> P<Item> {
let mut item2 = (*item).clone();
{
let mut add_attr = |s| {
item2.attrs.push(attr::mk_attr_outer(attr::mk_attr_id(), attr::mk_word_item(InternedString::new(s))));
};
add_attr("must_root");
add_attr("privatize");
add_attr("jstraceable");
// The following attributes are only for internal usage
add_attr("_generate_reflector");
// #[dom_struct] gets consumed, so this lets us keep around a residue
// Do NOT register a modifier/decorator on this attribute
add_attr("_dom_struct_marker");
}
P(item2)
}
/// Provides the hook to expand `#[jstraceable]` into an implementation of `JSTraceable`
///
/// The expansion basically calls `trace()` on all of the fields of the struct/enum, erroring if they do not implement the method.
pub fn expand_jstraceable(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Item, push: &mut FnMut(P<Item>)) {
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
path: ty::Path::new(vec!("dom","bindings","trace","JSTraceable")),
additional_bounds: Vec::new(),
generics: ty::LifetimeBounds::empty(),
methods: vec![
MethodDef {
name: "trace",
generics: ty::LifetimeBounds::empty(),
explicit_self: ty::borrowed_explicit_self(),
args: vec!(ty::Ptr(box ty::Literal(ty::Path::new(vec!("js","jsapi","JSTracer"))), ty::Raw(ast::MutMutable))),
ret_ty: ty::nil_ty(),
attributes: vec!(attr::mk_attr_outer(attr::mk_attr_id(),
attr::mk_name_value_item_str(InternedString::new("inline"),
InternedString::new("always")))),
combine_substructure: combine_substructure(box jstraceable_substructure)
}
],
associated_types: vec![],
};
trait_def.expand(cx, mitem, item, |a| push(a))
}
|
fn jstraceable_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> P<Expr> {
let state_expr = match substr.nonself_args {
[ref state_expr] => state_expr,
_ => cx.span_bug(trait_span, "incorrect number of arguments in `jstraceable`")
};
let trace_ident = substr.method_ident;
let call_trace = |span, thing_expr| {
let expr = cx.expr_method_call(span, thing_expr, trace_ident, vec!(state_expr.clone()));
cx.stmt_expr(expr)
};
let mut stmts = Vec::new();
let fields = match *substr.fields {
Struct(ref fs) | EnumMatching(_, _, ref fs) => fs,
_ => cx.span_bug(trait_span, "impossible substructure in `jstraceable`")
};
for &FieldInfo { ref self_, span,.. } in fields.iter() {
stmts.push(call_trace(span, self_.clone()));
}
cx.expr_block(cx.block(trait_span, stmts, None))
}
|
// Mostly copied from syntax::ext::deriving::hash
/// Defines how the implementation for `trace()` is to be generated
|
random_line_split
|
jstraceable.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ext::base::ExtCtxt;
use syntax::codemap::Span;
use syntax::ptr::P;
use syntax::ast::{Item, MetaItem, Expr};
use syntax::ast;
use syntax::attr;
use syntax::ext::build::AstBuilder;
use syntax::ext::deriving::generic::{combine_substructure, EnumMatching, FieldInfo, MethodDef, Struct, Substructure, TraitDef, ty};
use syntax::parse::token::InternedString;
pub fn expand_dom_struct(_: &mut ExtCtxt, _: Span, _: &MetaItem, item: P<Item>) -> P<Item> {
let mut item2 = (*item).clone();
{
let mut add_attr = |s| {
item2.attrs.push(attr::mk_attr_outer(attr::mk_attr_id(), attr::mk_word_item(InternedString::new(s))));
};
add_attr("must_root");
add_attr("privatize");
add_attr("jstraceable");
// The following attributes are only for internal usage
add_attr("_generate_reflector");
// #[dom_struct] gets consumed, so this lets us keep around a residue
// Do NOT register a modifier/decorator on this attribute
add_attr("_dom_struct_marker");
}
P(item2)
}
/// Provides the hook to expand `#[jstraceable]` into an implementation of `JSTraceable`
///
/// The expansion basically calls `trace()` on all of the fields of the struct/enum, erroring if they do not implement the method.
pub fn expand_jstraceable(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Item, push: &mut FnMut(P<Item>))
|
associated_types: vec![],
};
trait_def.expand(cx, mitem, item, |a| push(a))
}
// Mostly copied from syntax::ext::deriving::hash
/// Defines how the implementation for `trace()` is to be generated
fn jstraceable_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> P<Expr> {
let state_expr = match substr.nonself_args {
[ref state_expr] => state_expr,
_ => cx.span_bug(trait_span, "incorrect number of arguments in `jstraceable`")
};
let trace_ident = substr.method_ident;
let call_trace = |span, thing_expr| {
let expr = cx.expr_method_call(span, thing_expr, trace_ident, vec!(state_expr.clone()));
cx.stmt_expr(expr)
};
let mut stmts = Vec::new();
let fields = match *substr.fields {
Struct(ref fs) | EnumMatching(_, _, ref fs) => fs,
_ => cx.span_bug(trait_span, "impossible substructure in `jstraceable`")
};
for &FieldInfo { ref self_, span,.. } in fields.iter() {
stmts.push(call_trace(span, self_.clone()));
}
cx.expr_block(cx.block(trait_span, stmts, None))
}
|
{
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
path: ty::Path::new(vec!("dom","bindings","trace","JSTraceable")),
additional_bounds: Vec::new(),
generics: ty::LifetimeBounds::empty(),
methods: vec![
MethodDef {
name: "trace",
generics: ty::LifetimeBounds::empty(),
explicit_self: ty::borrowed_explicit_self(),
args: vec!(ty::Ptr(box ty::Literal(ty::Path::new(vec!("js","jsapi","JSTracer"))), ty::Raw(ast::MutMutable))),
ret_ty: ty::nil_ty(),
attributes: vec!(attr::mk_attr_outer(attr::mk_attr_id(),
attr::mk_name_value_item_str(InternedString::new("inline"),
InternedString::new("always")))),
combine_substructure: combine_substructure(box jstraceable_substructure)
}
],
|
identifier_body
|
jstraceable.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ext::base::ExtCtxt;
use syntax::codemap::Span;
use syntax::ptr::P;
use syntax::ast::{Item, MetaItem, Expr};
use syntax::ast;
use syntax::attr;
use syntax::ext::build::AstBuilder;
use syntax::ext::deriving::generic::{combine_substructure, EnumMatching, FieldInfo, MethodDef, Struct, Substructure, TraitDef, ty};
use syntax::parse::token::InternedString;
pub fn
|
(_: &mut ExtCtxt, _: Span, _: &MetaItem, item: P<Item>) -> P<Item> {
let mut item2 = (*item).clone();
{
let mut add_attr = |s| {
item2.attrs.push(attr::mk_attr_outer(attr::mk_attr_id(), attr::mk_word_item(InternedString::new(s))));
};
add_attr("must_root");
add_attr("privatize");
add_attr("jstraceable");
// The following attributes are only for internal usage
add_attr("_generate_reflector");
// #[dom_struct] gets consumed, so this lets us keep around a residue
// Do NOT register a modifier/decorator on this attribute
add_attr("_dom_struct_marker");
}
P(item2)
}
/// Provides the hook to expand `#[jstraceable]` into an implementation of `JSTraceable`
///
/// The expansion basically calls `trace()` on all of the fields of the struct/enum, erroring if they do not implement the method.
pub fn expand_jstraceable(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Item, push: &mut FnMut(P<Item>)) {
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
path: ty::Path::new(vec!("dom","bindings","trace","JSTraceable")),
additional_bounds: Vec::new(),
generics: ty::LifetimeBounds::empty(),
methods: vec![
MethodDef {
name: "trace",
generics: ty::LifetimeBounds::empty(),
explicit_self: ty::borrowed_explicit_self(),
args: vec!(ty::Ptr(box ty::Literal(ty::Path::new(vec!("js","jsapi","JSTracer"))), ty::Raw(ast::MutMutable))),
ret_ty: ty::nil_ty(),
attributes: vec!(attr::mk_attr_outer(attr::mk_attr_id(),
attr::mk_name_value_item_str(InternedString::new("inline"),
InternedString::new("always")))),
combine_substructure: combine_substructure(box jstraceable_substructure)
}
],
associated_types: vec![],
};
trait_def.expand(cx, mitem, item, |a| push(a))
}
// Mostly copied from syntax::ext::deriving::hash
/// Defines how the implementation for `trace()` is to be generated
fn jstraceable_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> P<Expr> {
let state_expr = match substr.nonself_args {
[ref state_expr] => state_expr,
_ => cx.span_bug(trait_span, "incorrect number of arguments in `jstraceable`")
};
let trace_ident = substr.method_ident;
let call_trace = |span, thing_expr| {
let expr = cx.expr_method_call(span, thing_expr, trace_ident, vec!(state_expr.clone()));
cx.stmt_expr(expr)
};
let mut stmts = Vec::new();
let fields = match *substr.fields {
Struct(ref fs) | EnumMatching(_, _, ref fs) => fs,
_ => cx.span_bug(trait_span, "impossible substructure in `jstraceable`")
};
for &FieldInfo { ref self_, span,.. } in fields.iter() {
stmts.push(call_trace(span, self_.clone()));
}
cx.expr_block(cx.block(trait_span, stmts, None))
}
|
expand_dom_struct
|
identifier_name
|
request.rs
|
use std::io::Read;
use std::net::{SocketAddr, TcpStream};
use super::headers::Headers;
use super::query::Query;
#[allow(dead_code)]
pub struct Request {
http_version: (u16, u16),
method: String,
scheme: String,
path: Vec<String>,
path_str: String,
query: Option<Query>,
headers: Headers,
content_length: Option<u64>,
stream: TcpStream,
}
impl Request {
pub fn new(method: &str, scheme: &str, url: &str, query: Option<Query>,
version: (u16, u16), headers: Headers,
content_length: Option<u64>,
stream: &TcpStream) -> Self {
let path = url[1..url.len()].split('/').map(|x| x.to_owned()).collect();
Request {
http_version: version,
method: method.to_owned(),
scheme: scheme.to_owned(),
path: path,
path_str: url.to_owned(),
headers: headers,
query: query,
content_length: content_length,
stream: stream.try_clone().unwrap(),
}
}
pub fn http_version(&self) -> (u16, u16) {
self.http_version
}
pub fn method(&self) -> &str {
&self.method
}
pub fn scheme(&self) -> &str {
&self.scheme
}
pub fn host(&self) -> SocketAddr {
self.stream.local_addr().unwrap()
}
pub fn path(&self) -> &str {
&self.path_str
}
pub fn path_components(&self) -> Vec<&str> {
self.path.iter().map(|i| i.as_ref()).collect()
}
pub fn query(&self) -> &Option<Query> {
&self.query
}
|
}
pub fn content_length(&self) -> Option<u64> {
self.content_length
}
pub fn headers(&self) -> &Headers {
&self.headers
}
pub fn body<'a>(&'a mut self) -> &'a mut Read {
&mut self.stream
}
}
|
pub fn remote_addr(&self) -> SocketAddr {
self.stream.peer_addr().unwrap()
|
random_line_split
|
request.rs
|
use std::io::Read;
use std::net::{SocketAddr, TcpStream};
use super::headers::Headers;
use super::query::Query;
#[allow(dead_code)]
pub struct Request {
http_version: (u16, u16),
method: String,
scheme: String,
path: Vec<String>,
path_str: String,
query: Option<Query>,
headers: Headers,
content_length: Option<u64>,
stream: TcpStream,
}
impl Request {
pub fn new(method: &str, scheme: &str, url: &str, query: Option<Query>,
version: (u16, u16), headers: Headers,
content_length: Option<u64>,
stream: &TcpStream) -> Self {
let path = url[1..url.len()].split('/').map(|x| x.to_owned()).collect();
Request {
http_version: version,
method: method.to_owned(),
scheme: scheme.to_owned(),
path: path,
path_str: url.to_owned(),
headers: headers,
query: query,
content_length: content_length,
stream: stream.try_clone().unwrap(),
}
}
pub fn http_version(&self) -> (u16, u16) {
self.http_version
}
pub fn method(&self) -> &str {
&self.method
}
pub fn scheme(&self) -> &str {
&self.scheme
}
pub fn host(&self) -> SocketAddr {
self.stream.local_addr().unwrap()
}
pub fn path(&self) -> &str {
&self.path_str
}
pub fn path_components(&self) -> Vec<&str> {
self.path.iter().map(|i| i.as_ref()).collect()
}
pub fn query(&self) -> &Option<Query>
|
pub fn remote_addr(&self) -> SocketAddr {
self.stream.peer_addr().unwrap()
}
pub fn content_length(&self) -> Option<u64> {
self.content_length
}
pub fn headers(&self) -> &Headers {
&self.headers
}
pub fn body<'a>(&'a mut self) -> &'a mut Read {
&mut self.stream
}
}
|
{
&self.query
}
|
identifier_body
|
request.rs
|
use std::io::Read;
use std::net::{SocketAddr, TcpStream};
use super::headers::Headers;
use super::query::Query;
#[allow(dead_code)]
pub struct Request {
http_version: (u16, u16),
method: String,
scheme: String,
path: Vec<String>,
path_str: String,
query: Option<Query>,
headers: Headers,
content_length: Option<u64>,
stream: TcpStream,
}
impl Request {
pub fn new(method: &str, scheme: &str, url: &str, query: Option<Query>,
version: (u16, u16), headers: Headers,
content_length: Option<u64>,
stream: &TcpStream) -> Self {
let path = url[1..url.len()].split('/').map(|x| x.to_owned()).collect();
Request {
http_version: version,
method: method.to_owned(),
scheme: scheme.to_owned(),
path: path,
path_str: url.to_owned(),
headers: headers,
query: query,
content_length: content_length,
stream: stream.try_clone().unwrap(),
}
}
pub fn http_version(&self) -> (u16, u16) {
self.http_version
}
pub fn method(&self) -> &str {
&self.method
}
pub fn scheme(&self) -> &str {
&self.scheme
}
pub fn host(&self) -> SocketAddr {
self.stream.local_addr().unwrap()
}
pub fn path(&self) -> &str {
&self.path_str
}
pub fn path_components(&self) -> Vec<&str> {
self.path.iter().map(|i| i.as_ref()).collect()
}
pub fn query(&self) -> &Option<Query> {
&self.query
}
pub fn remote_addr(&self) -> SocketAddr {
self.stream.peer_addr().unwrap()
}
pub fn
|
(&self) -> Option<u64> {
self.content_length
}
pub fn headers(&self) -> &Headers {
&self.headers
}
pub fn body<'a>(&'a mut self) -> &'a mut Read {
&mut self.stream
}
}
|
content_length
|
identifier_name
|
etcd.rs
|
extern crate serde;
extern crate serde_json;
extern crate hyper;
extern crate openssl;
use std::time::Duration;
use std::thread;
use hyper::Error;
use hyper::status::StatusCode;
use serde_json::Value;
pub use common::etcd::SSLOptions;
use common::etcd::etcd_https_client;
/// Polls the etcd health endpoint until it reports we're healthy.
pub fn wait_till_healthy(server_url: String, options: SSLOptions)
-> Result<(), Error>
|
if health == "true" {
return Ok(());
} else {
println!("Health is {}", health);
}
} else {
println!("Etcd not responding. Will try again");
}
}
}
|
{
let client = try!(etcd_https_client(options));
let health_url = server_url + "/health";
loop {
thread::sleep(Duration::from_millis(1000));
println!("Checking etcd status...");
if let Ok(response) = client.get(&health_url).send() {
if response.status != StatusCode::Ok {
println!("Got HTTP {}", response.status);
continue;
}
let value: Value = serde_json::from_reader(response).unwrap();
let health = value
.as_object().unwrap()
.get("health").unwrap()
.as_str().unwrap();
|
identifier_body
|
etcd.rs
|
extern crate serde;
extern crate serde_json;
extern crate hyper;
extern crate openssl;
use std::time::Duration;
use std::thread;
use hyper::Error;
use hyper::status::StatusCode;
use serde_json::Value;
pub use common::etcd::SSLOptions;
use common::etcd::etcd_https_client;
/// Polls the etcd health endpoint until it reports we're healthy.
pub fn wait_till_healthy(server_url: String, options: SSLOptions)
-> Result<(), Error> {
let client = try!(etcd_https_client(options));
let health_url = server_url + "/health";
loop {
thread::sleep(Duration::from_millis(1000));
println!("Checking etcd status...");
if let Ok(response) = client.get(&health_url).send() {
if response.status!= StatusCode::Ok {
println!("Got HTTP {}", response.status);
continue;
}
let value: Value = serde_json::from_reader(response).unwrap();
let health = value
.as_object().unwrap()
.get("health").unwrap()
.as_str().unwrap();
if health == "true" {
return Ok(());
} else {
println!("Health is {}", health);
|
}
} else {
println!("Etcd not responding. Will try again");
}
}
}
|
random_line_split
|
|
etcd.rs
|
extern crate serde;
extern crate serde_json;
extern crate hyper;
extern crate openssl;
use std::time::Duration;
use std::thread;
use hyper::Error;
use hyper::status::StatusCode;
use serde_json::Value;
pub use common::etcd::SSLOptions;
use common::etcd::etcd_https_client;
/// Polls the etcd health endpoint until it reports we're healthy.
pub fn
|
(server_url: String, options: SSLOptions)
-> Result<(), Error> {
let client = try!(etcd_https_client(options));
let health_url = server_url + "/health";
loop {
thread::sleep(Duration::from_millis(1000));
println!("Checking etcd status...");
if let Ok(response) = client.get(&health_url).send() {
if response.status!= StatusCode::Ok {
println!("Got HTTP {}", response.status);
continue;
}
let value: Value = serde_json::from_reader(response).unwrap();
let health = value
.as_object().unwrap()
.get("health").unwrap()
.as_str().unwrap();
if health == "true" {
return Ok(());
} else {
println!("Health is {}", health);
}
} else {
println!("Etcd not responding. Will try again");
}
}
}
|
wait_till_healthy
|
identifier_name
|
etcd.rs
|
extern crate serde;
extern crate serde_json;
extern crate hyper;
extern crate openssl;
use std::time::Duration;
use std::thread;
use hyper::Error;
use hyper::status::StatusCode;
use serde_json::Value;
pub use common::etcd::SSLOptions;
use common::etcd::etcd_https_client;
/// Polls the etcd health endpoint until it reports we're healthy.
pub fn wait_till_healthy(server_url: String, options: SSLOptions)
-> Result<(), Error> {
let client = try!(etcd_https_client(options));
let health_url = server_url + "/health";
loop {
thread::sleep(Duration::from_millis(1000));
println!("Checking etcd status...");
if let Ok(response) = client.get(&health_url).send() {
if response.status!= StatusCode::Ok {
println!("Got HTTP {}", response.status);
continue;
}
let value: Value = serde_json::from_reader(response).unwrap();
let health = value
.as_object().unwrap()
.get("health").unwrap()
.as_str().unwrap();
if health == "true" {
return Ok(());
} else {
println!("Health is {}", health);
}
} else
|
}
}
|
{
println!("Etcd not responding. Will try again");
}
|
conditional_block
|
linearinterpol.rs
|
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use std::io::{self, Read, Write};
use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::BinarySerializable;
use common::FixedSize;
use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearInterpolFastFieldReader {
bit_unpacker: BitUnpacker,
pub footer: LinearInterpolFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearInterpolFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
pub last_val: u64,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
}
impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
self.first_val.serialize(write)?;
self.last_val.serialize(write)?;
self.num_vals.serialize(write)?;
self.min_value.serialize(write)?;
self.max_value.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
last_val: u64::deserialize(reader)?,
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
})
}
}
impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearInterpolFastFieldSerializer {}
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
first_val + (pos as f32 * slope) as u64
}
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
const ID: u8 = 2;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
//positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
// rel_positive_max will be adjusted by offset
let relative_max_value = rel_positive_max + offset;
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearInterpolFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 3 {
return false; //disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
|
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10%.. 95%, 100%
let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within 50%
// threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[inline]
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01);
assert!(estimate < 0.01);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn linear_interpol_fast_field_test_large_amplitude() {
let data = vec![
i64::MAX as u64 / 2,
i64::MAX as u64 / 3,
i64::MAX as u64 / 2,
];
create_and_validate(&data, "large amplitude");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
create_and_validate(&data, "concave data");
}
#[test]
fn linear_interpol_fast_convex_data() {
let data = vec![0, 40, 60, 70, 75, 77];
create_and_validate(&data, "convex data");
}
#[test]
fn linear_interpol_fast_field_test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn linear_interpol_fast_field_rand() {
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}
|
{
|
random_line_split
|
linearinterpol.rs
|
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use std::io::{self, Read, Write};
use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::BinarySerializable;
use common::FixedSize;
use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearInterpolFastFieldReader {
bit_unpacker: BitUnpacker,
pub footer: LinearInterpolFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearInterpolFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
pub last_val: u64,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
}
impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
self.first_val.serialize(write)?;
self.last_val.serialize(write)?;
self.num_vals.serialize(write)?;
self.min_value.serialize(write)?;
self.max_value.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
last_val: u64::deserialize(reader)?,
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
})
}
}
impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearInterpolFastFieldSerializer {}
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
first_val + (pos as f32 * slope) as u64
}
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
const ID: u8 = 2;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else
|
}
// rel_positive_max will be adjusted by offset
let relative_max_value = rel_positive_max + offset;
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearInterpolFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 3 {
return false; //disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10%.. 95%, 100%
let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within 50%
// threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[inline]
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01);
assert!(estimate < 0.01);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn linear_interpol_fast_field_test_large_amplitude() {
let data = vec![
i64::MAX as u64 / 2,
i64::MAX as u64 / 3,
i64::MAX as u64 / 2,
];
create_and_validate(&data, "large amplitude");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
create_and_validate(&data, "concave data");
}
#[test]
fn linear_interpol_fast_convex_data() {
let data = vec![0, 40, 60, 70, 75, 77];
create_and_validate(&data, "convex data");
}
#[test]
fn linear_interpol_fast_field_test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn linear_interpol_fast_field_rand() {
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}
|
{
//positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
|
conditional_block
|
linearinterpol.rs
|
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use std::io::{self, Read, Write};
use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::BinarySerializable;
use common::FixedSize;
use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearInterpolFastFieldReader {
bit_unpacker: BitUnpacker,
pub footer: LinearInterpolFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearInterpolFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
pub last_val: u64,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
}
impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
self.first_val.serialize(write)?;
self.last_val.serialize(write)?;
self.num_vals.serialize(write)?;
self.min_value.serialize(write)?;
self.max_value.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
last_val: u64::deserialize(reader)?,
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
})
}
}
impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearInterpolFastFieldSerializer {}
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
first_val + (pos as f32 * slope) as u64
}
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
const ID: u8 = 2;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
//positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
// rel_positive_max will be adjusted by offset
let relative_max_value = rel_positive_max + offset;
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearInterpolFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 3 {
return false; //disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10%.. 95%, 100%
let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within 50%
// threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[inline]
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01);
assert!(estimate < 0.01);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn linear_interpol_fast_field_test_large_amplitude() {
let data = vec![
i64::MAX as u64 / 2,
i64::MAX as u64 / 3,
i64::MAX as u64 / 2,
];
create_and_validate(&data, "large amplitude");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
create_and_validate(&data, "concave data");
}
#[test]
fn linear_interpol_fast_convex_data() {
let data = vec![0, 40, 60, 70, 75, 77];
create_and_validate(&data, "convex data");
}
#[test]
fn linear_interpol_fast_field_test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn linear_interpol_fast_field_rand()
|
}
|
{
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
|
identifier_body
|
linearinterpol.rs
|
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use std::io::{self, Read, Write};
use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::BinarySerializable;
use common::FixedSize;
use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearInterpolFastFieldReader {
bit_unpacker: BitUnpacker,
pub footer: LinearInterpolFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearInterpolFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
pub last_val: u64,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
}
impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
self.first_val.serialize(write)?;
self.last_val.serialize(write)?;
self.num_vals.serialize(write)?;
self.min_value.serialize(write)?;
self.max_value.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
last_val: u64::deserialize(reader)?,
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
})
}
}
impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearInterpolFastFieldSerializer {}
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
first_val + (pos as f32 * slope) as u64
}
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
const ID: u8 = 2;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
//positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
// rel_positive_max will be adjusted by offset
let relative_max_value = rel_positive_max + offset;
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearInterpolFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 3 {
return false; //disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10%.. 95%, 100%
let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within 50%
// threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[inline]
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
crate::tests::create_and_validate::<
LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
>(data, name)
}
#[test]
fn test_compression() {
let data = (10..=6_000_u64).collect::<Vec<_>>();
let (estimate, actual_compression) =
create_and_validate(&data, "simple monotonically large");
assert!(actual_compression < 0.01);
assert!(estimate < 0.01);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn linear_interpol_fast_field_test_large_amplitude() {
let data = vec![
i64::MAX as u64 / 2,
i64::MAX as u64 / 3,
i64::MAX as u64 / 2,
];
create_and_validate(&data, "large amplitude");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
create_and_validate(&data, "concave data");
}
#[test]
fn linear_interpol_fast_convex_data() {
let data = vec![0, 40, 60, 70, 75, 77];
create_and_validate(&data, "convex data");
}
#[test]
fn
|
() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn linear_interpol_fast_field_rand() {
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}
|
linear_interpol_fast_field_test_simple
|
identifier_name
|
lib.rs
|
#![no_std]
#![allow(non_upper_case_globals)]
use libtww::{game::Console, system::custom_game_loop};
use gcn_fonts::prelude::*;
pub mod cheat_menu;
pub mod controller;
pub mod flag_menu;
pub mod inventory_menu;
pub mod main_menu;
pub mod memory;
pub mod popups;
pub mod print;
pub mod settings;
pub mod spawn_menu;
pub mod utils;
pub mod warp_menu;
pub static mut visible: bool = false;
struct State {
font: UploadedFont,
settings: settings::Settings,
}
static mut STATE: Option<State> = None;
unsafe fn get_state() -> &'static mut State {
STATE.get_or_insert_with(|| State {
font: gcn_fonts::include_font! { path: "res/Calamity-Bold.ttf", size: 18.0 }.upload(),
settings: settings::Settings { drop_shadow: true },
})
}
#[no_mangle]
pub extern "C" fn game_loop() ->! {
let console = Console::get();
console.line_count = 32;
console.x = 0;
console.y = 16;
console.font_scale_x *= 1.2;
console.font_scale_y *= 1.2;
console.background_color.a = 150;
console.clear();
custom_game_loop(|| {
cheat_menu::apply_cheats();
let d_down = controller::DPAD_DOWN.is_pressed();
let rt_down = controller::R.is_down();
let console = Console::get();
if unsafe { visible } {
console.background_color.a = 150;
utils::render();
} else if d_down && rt_down && unsafe {!popups::visible } {
console.visible = true;
unsafe {
visible = true;
}
} else {
// Only check popups if the Debug Menu is not open
popups::check_global_flags();
}
})
}
#[no_mangle]
pub unsafe extern "C" fn draw()
|
{
print::setup_draw();
memory::render_watches();
}
|
identifier_body
|
|
lib.rs
|
#![no_std]
#![allow(non_upper_case_globals)]
use libtww::{game::Console, system::custom_game_loop};
use gcn_fonts::prelude::*;
pub mod cheat_menu;
pub mod controller;
pub mod flag_menu;
pub mod inventory_menu;
pub mod main_menu;
pub mod memory;
pub mod popups;
pub mod print;
pub mod settings;
pub mod spawn_menu;
pub mod utils;
pub mod warp_menu;
pub static mut visible: bool = false;
struct State {
font: UploadedFont,
settings: settings::Settings,
}
static mut STATE: Option<State> = None;
unsafe fn get_state() -> &'static mut State {
STATE.get_or_insert_with(|| State {
font: gcn_fonts::include_font! { path: "res/Calamity-Bold.ttf", size: 18.0 }.upload(),
settings: settings::Settings { drop_shadow: true },
})
}
#[no_mangle]
pub extern "C" fn game_loop() ->! {
let console = Console::get();
console.line_count = 32;
console.x = 0;
console.y = 16;
console.font_scale_x *= 1.2;
console.font_scale_y *= 1.2;
console.background_color.a = 150;
console.clear();
custom_game_loop(|| {
cheat_menu::apply_cheats();
let d_down = controller::DPAD_DOWN.is_pressed();
let rt_down = controller::R.is_down();
let console = Console::get();
if unsafe { visible } {
console.background_color.a = 150;
utils::render();
} else if d_down && rt_down && unsafe {!popups::visible } {
console.visible = true;
unsafe {
visible = true;
}
} else {
// Only check popups if the Debug Menu is not open
popups::check_global_flags();
}
})
}
#[no_mangle]
pub unsafe extern "C" fn
|
() {
print::setup_draw();
memory::render_watches();
}
|
draw
|
identifier_name
|
lib.rs
|
use gcn_fonts::prelude::*;
pub mod cheat_menu;
pub mod controller;
pub mod flag_menu;
pub mod inventory_menu;
pub mod main_menu;
pub mod memory;
pub mod popups;
pub mod print;
pub mod settings;
pub mod spawn_menu;
pub mod utils;
pub mod warp_menu;
pub static mut visible: bool = false;
struct State {
font: UploadedFont,
settings: settings::Settings,
}
static mut STATE: Option<State> = None;
unsafe fn get_state() -> &'static mut State {
STATE.get_or_insert_with(|| State {
font: gcn_fonts::include_font! { path: "res/Calamity-Bold.ttf", size: 18.0 }.upload(),
settings: settings::Settings { drop_shadow: true },
})
}
#[no_mangle]
pub extern "C" fn game_loop() ->! {
let console = Console::get();
console.line_count = 32;
console.x = 0;
console.y = 16;
console.font_scale_x *= 1.2;
console.font_scale_y *= 1.2;
console.background_color.a = 150;
console.clear();
custom_game_loop(|| {
cheat_menu::apply_cheats();
let d_down = controller::DPAD_DOWN.is_pressed();
let rt_down = controller::R.is_down();
let console = Console::get();
if unsafe { visible } {
console.background_color.a = 150;
utils::render();
} else if d_down && rt_down && unsafe {!popups::visible } {
console.visible = true;
unsafe {
visible = true;
}
} else {
// Only check popups if the Debug Menu is not open
popups::check_global_flags();
}
})
}
#[no_mangle]
pub unsafe extern "C" fn draw() {
print::setup_draw();
memory::render_watches();
}
|
#![no_std]
#![allow(non_upper_case_globals)]
use libtww::{game::Console, system::custom_game_loop};
|
random_line_split
|
|
issue-13853.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::marker::MarkerTrait;
trait Node : MarkerTrait {
fn zomg();
}
trait Graph<N: Node> {
fn nodes<'a, I: Iterator<Item=&'a N>>(&'a self) -> I;
}
impl<N: Node> Graph<N> for Vec<N> {
fn nodes<'a, I: Iterator<Item=&'a N>>(&self) -> I {
self.iter() //~ ERROR mismatched types
}
}
struct Stuff;
impl Node for Stuff {
fn zomg()
|
}
fn iterate<N: Node, G: Graph<N>>(graph: &G) {
for node in graph.iter() { //~ ERROR does not implement any method in scope named
node.zomg(); //~ error: the type of this value must be known in this context
}
}
pub fn main() {
let graph = Vec::new();
graph.push(Stuff);
iterate(graph); //~ ERROR mismatched types
}
|
{
println!("zomg");
}
|
identifier_body
|
issue-13853.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
fn zomg();
}
trait Graph<N: Node> {
fn nodes<'a, I: Iterator<Item=&'a N>>(&'a self) -> I;
}
impl<N: Node> Graph<N> for Vec<N> {
fn nodes<'a, I: Iterator<Item=&'a N>>(&self) -> I {
self.iter() //~ ERROR mismatched types
}
}
struct Stuff;
impl Node for Stuff {
fn zomg() {
println!("zomg");
}
}
fn iterate<N: Node, G: Graph<N>>(graph: &G) {
for node in graph.iter() { //~ ERROR does not implement any method in scope named
node.zomg(); //~ error: the type of this value must be known in this context
}
}
pub fn main() {
let graph = Vec::new();
graph.push(Stuff);
iterate(graph); //~ ERROR mismatched types
}
|
use std::marker::MarkerTrait;
trait Node : MarkerTrait {
|
random_line_split
|
issue-13853.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::marker::MarkerTrait;
trait Node : MarkerTrait {
fn zomg();
}
trait Graph<N: Node> {
fn nodes<'a, I: Iterator<Item=&'a N>>(&'a self) -> I;
}
impl<N: Node> Graph<N> for Vec<N> {
fn nodes<'a, I: Iterator<Item=&'a N>>(&self) -> I {
self.iter() //~ ERROR mismatched types
}
}
struct Stuff;
impl Node for Stuff {
fn zomg() {
println!("zomg");
}
}
fn iterate<N: Node, G: Graph<N>>(graph: &G) {
for node in graph.iter() { //~ ERROR does not implement any method in scope named
node.zomg(); //~ error: the type of this value must be known in this context
}
}
pub fn
|
() {
let graph = Vec::new();
graph.push(Stuff);
iterate(graph); //~ ERROR mismatched types
}
|
main
|
identifier_name
|
effects.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Generic types for CSS values related to effects.
/// A generic value for a single `box-shadow`.
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C)]
pub struct GenericBoxShadow<Color, SizeLength, BlurShapeLength, ShapeLength> {
/// The base shadow.
pub base: GenericSimpleShadow<Color, SizeLength, BlurShapeLength>,
/// The spread radius.
pub spread: ShapeLength,
/// Whether this is an inset box shadow.
#[animation(constant)]
#[css(represents_keyword)]
pub inset: bool,
}
pub use self::GenericBoxShadow as BoxShadow;
/// A generic value for a single `filter`.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[animation(no_bound(U))]
#[derive(
Clone,
ComputeSquaredDistance,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C, u8)]
pub enum GenericFilter<Angle, NonNegativeFactor, ZeroToOneFactor, Length, Shadow, U> {
/// `blur(<length>)`
#[css(function)]
Blur(Length),
/// `brightness(<factor>)`
#[css(function)]
Brightness(NonNegativeFactor),
/// `contrast(<factor>)`
#[css(function)]
Contrast(NonNegativeFactor),
/// `grayscale(<factor>)`
#[css(function)]
Grayscale(ZeroToOneFactor),
/// `hue-rotate(<angle>)`
#[css(function)]
HueRotate(Angle),
/// `invert(<factor>)`
#[css(function)]
Invert(ZeroToOneFactor),
/// `opacity(<factor>)`
#[css(function)]
Opacity(ZeroToOneFactor),
/// `saturate(<factor>)`
#[css(function)]
Saturate(NonNegativeFactor),
/// `sepia(<factor>)`
#[css(function)]
Sepia(ZeroToOneFactor),
/// `drop-shadow(...)`
#[css(function)]
DropShadow(Shadow),
/// `<url>`
#[animation(error)]
Url(U),
}
pub use self::GenericFilter as Filter;
/// A generic value for the `drop-shadow()` filter and the `text-shadow` property.
///
/// Contrary to the canonical order from the spec, the color is serialised
/// first, like in Gecko and Webkit.
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C)]
pub struct
|
<Color, SizeLength, ShapeLength> {
/// Color.
pub color: Color,
/// Horizontal radius.
pub horizontal: SizeLength,
/// Vertical radius.
pub vertical: SizeLength,
/// Blur radius.
pub blur: ShapeLength,
}
pub use self::GenericSimpleShadow as SimpleShadow;
|
GenericSimpleShadow
|
identifier_name
|
effects.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Generic types for CSS values related to effects.
/// A generic value for a single `box-shadow`.
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C)]
pub struct GenericBoxShadow<Color, SizeLength, BlurShapeLength, ShapeLength> {
/// The base shadow.
pub base: GenericSimpleShadow<Color, SizeLength, BlurShapeLength>,
/// The spread radius.
pub spread: ShapeLength,
/// Whether this is an inset box shadow.
#[animation(constant)]
#[css(represents_keyword)]
pub inset: bool,
}
pub use self::GenericBoxShadow as BoxShadow;
/// A generic value for a single `filter`.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[animation(no_bound(U))]
#[derive(
Clone,
ComputeSquaredDistance,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToComputedValue,
ToCss,
ToResolvedValue,
ToShmem,
)]
#[repr(C, u8)]
pub enum GenericFilter<Angle, NonNegativeFactor, ZeroToOneFactor, Length, Shadow, U> {
/// `blur(<length>)`
#[css(function)]
Blur(Length),
/// `brightness(<factor>)`
#[css(function)]
Brightness(NonNegativeFactor),
/// `contrast(<factor>)`
#[css(function)]
Contrast(NonNegativeFactor),
/// `grayscale(<factor>)`
#[css(function)]
Grayscale(ZeroToOneFactor),
/// `hue-rotate(<angle>)`
#[css(function)]
HueRotate(Angle),
/// `invert(<factor>)`
#[css(function)]
Invert(ZeroToOneFactor),
/// `opacity(<factor>)`
#[css(function)]
Opacity(ZeroToOneFactor),
/// `saturate(<factor>)`
#[css(function)]
Saturate(NonNegativeFactor),
/// `sepia(<factor>)`
#[css(function)]
Sepia(ZeroToOneFactor),
/// `drop-shadow(...)`
#[css(function)]
DropShadow(Shadow),
/// `<url>`
#[animation(error)]
Url(U),
}
pub use self::GenericFilter as Filter;
/// A generic value for the `drop-shadow()` filter and the `text-shadow` property.
///
/// Contrary to the canonical order from the spec, the color is serialised
/// first, like in Gecko and Webkit.
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToCss,
ToResolvedValue,
ToShmem,
)]
|
pub color: Color,
/// Horizontal radius.
pub horizontal: SizeLength,
/// Vertical radius.
pub vertical: SizeLength,
/// Blur radius.
pub blur: ShapeLength,
}
pub use self::GenericSimpleShadow as SimpleShadow;
|
#[repr(C)]
pub struct GenericSimpleShadow<Color, SizeLength, ShapeLength> {
/// Color.
|
random_line_split
|
lib.rs
|
//! The `check` crate is responsible for ensuring that an AST expression is actually a valid
//! program. This currently consits of three larger parts, typechecking, kindchecking and renaming.
//! If an AST passes the checks in `Typecheck::typecheck_expr` (which runs all of theses checks
//! the expression is expected to compile succesfully (if it does not it should be considered an
//! internal compiler error.
#![doc(html_root_url = "https://docs.rs/gluon_check/0.17.2")] // # GLUON
#[macro_use]
extern crate collect_mac;
#[cfg(test)]
extern crate env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate gluon_base as base;
#[macro_use]
extern crate gluon_codegen;
pub mod kindcheck;
pub mod metadata;
mod recursion_check;
pub mod rename;
pub mod substitution;
mod typ;
pub mod typecheck;
pub mod unify;
pub mod unify_type;
mod implicits;
use crate::base::{
fnv::FnvMap,
kind::Kind,
metadata::MetadataEnv,
symbol::Symbol,
types::{translate_type, ArcType, PrimitiveEnv, SharedInterner, TypeEnv, TypeExt},
};
use crate::{substitution::Substitution, typ::RcType};
/// Checks if `actual` can be assigned to a binding with the type signature `signature`
pub fn check_signature(
env: &dyn TypecheckEnv<Type = ArcType>,
signature: &ArcType,
actual: &ArcType,
) -> bool {
let interner = SharedInterner::default();
let signature = translate_type(&mut &interner, signature);
let actual = translate_type(&mut &interner, actual);
check_signature_(&env, &interner, &signature, &actual)
}
fn check_signature_(
env: &dyn TypeEnv<Type = RcType>,
interner: &SharedInterner<Symbol, RcType>,
signature: &RcType,
actual: &RcType,
) -> bool {
let subs = Substitution::new(Kind::typ(), interner.clone());
let state = unify_type::State::new(env, &subs);
let actual = actual.instantiate_generics(&mut &subs, &mut FnvMap::default());
let result = unify_type::subsumes(&subs, state, signature, &actual);
if let Err((_, ref err)) = result {
warn!("Check signature error: {}", err);
}
result.is_ok()
}
pub trait TypecheckEnv: PrimitiveEnv + MetadataEnv {}
impl<T> TypecheckEnv for T where T: PrimitiveEnv + MetadataEnv {}
#[cfg(test)]
mod tests {
use super::*;
use std::{cell::RefCell, rc::Rc};
use crate::base::{
kind::{ArcKind, KindEnv},
symbol::{Symbol, SymbolModule, SymbolRef, Symbols},
types::{Alias, TypeEnv},
};
pub struct MockEnv;
impl KindEnv for MockEnv {
fn find_kind(&self, _type_name: &SymbolRef) -> Option<ArcKind> {
None
}
}
impl TypeEnv for MockEnv {
type Type = RcType;
fn find_type(&self, _id: &SymbolRef) -> Option<ArcType>
|
fn find_type_info(&self, _id: &SymbolRef) -> Option<Alias<Symbol, RcType>> {
None
}
}
/// Returns a reference to the interner stored in TLD
pub fn get_local_interner() -> Rc<RefCell<Symbols>> {
thread_local!(static INTERNER: Rc<RefCell<Symbols>>
= Rc::new(RefCell::new(Symbols::new())));
INTERNER.with(|interner| interner.clone())
}
pub fn intern(s: &str) -> Symbol {
let interner = get_local_interner();
let mut interner = interner.borrow_mut();
if s.starts_with(char::is_lowercase) {
interner.simple_symbol(s)
} else {
SymbolModule::new("test".into(), &mut interner).scoped_symbol(s)
}
}
}
|
{
None
}
|
identifier_body
|
lib.rs
|
//! The `check` crate is responsible for ensuring that an AST expression is actually a valid
//! program. This currently consits of three larger parts, typechecking, kindchecking and renaming.
//! If an AST passes the checks in `Typecheck::typecheck_expr` (which runs all of theses checks
//! the expression is expected to compile succesfully (if it does not it should be considered an
//! internal compiler error.
#![doc(html_root_url = "https://docs.rs/gluon_check/0.17.2")] // # GLUON
#[macro_use]
extern crate collect_mac;
#[cfg(test)]
extern crate env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate gluon_base as base;
#[macro_use]
extern crate gluon_codegen;
pub mod kindcheck;
pub mod metadata;
mod recursion_check;
pub mod rename;
pub mod substitution;
mod typ;
pub mod typecheck;
pub mod unify;
pub mod unify_type;
mod implicits;
use crate::base::{
fnv::FnvMap,
kind::Kind,
metadata::MetadataEnv,
symbol::Symbol,
types::{translate_type, ArcType, PrimitiveEnv, SharedInterner, TypeEnv, TypeExt},
};
use crate::{substitution::Substitution, typ::RcType};
/// Checks if `actual` can be assigned to a binding with the type signature `signature`
pub fn check_signature(
env: &dyn TypecheckEnv<Type = ArcType>,
signature: &ArcType,
actual: &ArcType,
) -> bool {
let interner = SharedInterner::default();
let signature = translate_type(&mut &interner, signature);
let actual = translate_type(&mut &interner, actual);
check_signature_(&env, &interner, &signature, &actual)
}
fn check_signature_(
env: &dyn TypeEnv<Type = RcType>,
interner: &SharedInterner<Symbol, RcType>,
signature: &RcType,
actual: &RcType,
) -> bool {
let subs = Substitution::new(Kind::typ(), interner.clone());
let state = unify_type::State::new(env, &subs);
let actual = actual.instantiate_generics(&mut &subs, &mut FnvMap::default());
let result = unify_type::subsumes(&subs, state, signature, &actual);
if let Err((_, ref err)) = result {
warn!("Check signature error: {}", err);
}
result.is_ok()
}
pub trait TypecheckEnv: PrimitiveEnv + MetadataEnv {}
impl<T> TypecheckEnv for T where T: PrimitiveEnv + MetadataEnv {}
#[cfg(test)]
mod tests {
use super::*;
use std::{cell::RefCell, rc::Rc};
use crate::base::{
kind::{ArcKind, KindEnv},
symbol::{Symbol, SymbolModule, SymbolRef, Symbols},
types::{Alias, TypeEnv},
};
pub struct MockEnv;
impl KindEnv for MockEnv {
fn find_kind(&self, _type_name: &SymbolRef) -> Option<ArcKind> {
None
}
}
impl TypeEnv for MockEnv {
type Type = RcType;
fn find_type(&self, _id: &SymbolRef) -> Option<ArcType> {
None
}
fn find_type_info(&self, _id: &SymbolRef) -> Option<Alias<Symbol, RcType>> {
None
}
}
/// Returns a reference to the interner stored in TLD
pub fn
|
() -> Rc<RefCell<Symbols>> {
thread_local!(static INTERNER: Rc<RefCell<Symbols>>
= Rc::new(RefCell::new(Symbols::new())));
INTERNER.with(|interner| interner.clone())
}
pub fn intern(s: &str) -> Symbol {
let interner = get_local_interner();
let mut interner = interner.borrow_mut();
if s.starts_with(char::is_lowercase) {
interner.simple_symbol(s)
} else {
SymbolModule::new("test".into(), &mut interner).scoped_symbol(s)
}
}
}
|
get_local_interner
|
identifier_name
|
lib.rs
|
//! The `check` crate is responsible for ensuring that an AST expression is actually a valid
//! program. This currently consits of three larger parts, typechecking, kindchecking and renaming.
//! If an AST passes the checks in `Typecheck::typecheck_expr` (which runs all of theses checks
//! the expression is expected to compile succesfully (if it does not it should be considered an
//! internal compiler error.
#![doc(html_root_url = "https://docs.rs/gluon_check/0.17.2")] // # GLUON
#[macro_use]
extern crate collect_mac;
#[cfg(test)]
extern crate env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate gluon_base as base;
#[macro_use]
extern crate gluon_codegen;
pub mod kindcheck;
pub mod metadata;
mod recursion_check;
pub mod rename;
pub mod substitution;
mod typ;
pub mod typecheck;
pub mod unify;
pub mod unify_type;
mod implicits;
use crate::base::{
fnv::FnvMap,
kind::Kind,
metadata::MetadataEnv,
symbol::Symbol,
types::{translate_type, ArcType, PrimitiveEnv, SharedInterner, TypeEnv, TypeExt},
};
use crate::{substitution::Substitution, typ::RcType};
/// Checks if `actual` can be assigned to a binding with the type signature `signature`
pub fn check_signature(
env: &dyn TypecheckEnv<Type = ArcType>,
signature: &ArcType,
actual: &ArcType,
) -> bool {
let interner = SharedInterner::default();
let signature = translate_type(&mut &interner, signature);
let actual = translate_type(&mut &interner, actual);
check_signature_(&env, &interner, &signature, &actual)
}
fn check_signature_(
env: &dyn TypeEnv<Type = RcType>,
interner: &SharedInterner<Symbol, RcType>,
signature: &RcType,
actual: &RcType,
) -> bool {
let subs = Substitution::new(Kind::typ(), interner.clone());
let state = unify_type::State::new(env, &subs);
let actual = actual.instantiate_generics(&mut &subs, &mut FnvMap::default());
let result = unify_type::subsumes(&subs, state, signature, &actual);
if let Err((_, ref err)) = result
|
result.is_ok()
}
pub trait TypecheckEnv: PrimitiveEnv + MetadataEnv {}
impl<T> TypecheckEnv for T where T: PrimitiveEnv + MetadataEnv {}
#[cfg(test)]
mod tests {
use super::*;
use std::{cell::RefCell, rc::Rc};
use crate::base::{
kind::{ArcKind, KindEnv},
symbol::{Symbol, SymbolModule, SymbolRef, Symbols},
types::{Alias, TypeEnv},
};
pub struct MockEnv;
impl KindEnv for MockEnv {
fn find_kind(&self, _type_name: &SymbolRef) -> Option<ArcKind> {
None
}
}
impl TypeEnv for MockEnv {
type Type = RcType;
fn find_type(&self, _id: &SymbolRef) -> Option<ArcType> {
None
}
fn find_type_info(&self, _id: &SymbolRef) -> Option<Alias<Symbol, RcType>> {
None
}
}
/// Returns a reference to the interner stored in TLD
pub fn get_local_interner() -> Rc<RefCell<Symbols>> {
thread_local!(static INTERNER: Rc<RefCell<Symbols>>
= Rc::new(RefCell::new(Symbols::new())));
INTERNER.with(|interner| interner.clone())
}
pub fn intern(s: &str) -> Symbol {
let interner = get_local_interner();
let mut interner = interner.borrow_mut();
if s.starts_with(char::is_lowercase) {
interner.simple_symbol(s)
} else {
SymbolModule::new("test".into(), &mut interner).scoped_symbol(s)
}
}
}
|
{
warn!("Check signature error: {}", err);
}
|
conditional_block
|
lib.rs
|
//! The `check` crate is responsible for ensuring that an AST expression is actually a valid
//! program. This currently consits of three larger parts, typechecking, kindchecking and renaming.
//! If an AST passes the checks in `Typecheck::typecheck_expr` (which runs all of theses checks
//! the expression is expected to compile succesfully (if it does not it should be considered an
//! internal compiler error.
#![doc(html_root_url = "https://docs.rs/gluon_check/0.17.2")] // # GLUON
#[macro_use]
extern crate collect_mac;
#[cfg(test)]
extern crate env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate gluon_base as base;
#[macro_use]
extern crate gluon_codegen;
pub mod kindcheck;
pub mod metadata;
mod recursion_check;
pub mod rename;
pub mod substitution;
mod typ;
pub mod typecheck;
pub mod unify;
pub mod unify_type;
mod implicits;
use crate::base::{
fnv::FnvMap,
kind::Kind,
metadata::MetadataEnv,
symbol::Symbol,
|
types::{translate_type, ArcType, PrimitiveEnv, SharedInterner, TypeEnv, TypeExt},
};
use crate::{substitution::Substitution, typ::RcType};
/// Checks if `actual` can be assigned to a binding with the type signature `signature`
pub fn check_signature(
env: &dyn TypecheckEnv<Type = ArcType>,
signature: &ArcType,
actual: &ArcType,
) -> bool {
let interner = SharedInterner::default();
let signature = translate_type(&mut &interner, signature);
let actual = translate_type(&mut &interner, actual);
check_signature_(&env, &interner, &signature, &actual)
}
fn check_signature_(
env: &dyn TypeEnv<Type = RcType>,
interner: &SharedInterner<Symbol, RcType>,
signature: &RcType,
actual: &RcType,
) -> bool {
let subs = Substitution::new(Kind::typ(), interner.clone());
let state = unify_type::State::new(env, &subs);
let actual = actual.instantiate_generics(&mut &subs, &mut FnvMap::default());
let result = unify_type::subsumes(&subs, state, signature, &actual);
if let Err((_, ref err)) = result {
warn!("Check signature error: {}", err);
}
result.is_ok()
}
pub trait TypecheckEnv: PrimitiveEnv + MetadataEnv {}
impl<T> TypecheckEnv for T where T: PrimitiveEnv + MetadataEnv {}
#[cfg(test)]
mod tests {
use super::*;
use std::{cell::RefCell, rc::Rc};
use crate::base::{
kind::{ArcKind, KindEnv},
symbol::{Symbol, SymbolModule, SymbolRef, Symbols},
types::{Alias, TypeEnv},
};
pub struct MockEnv;
impl KindEnv for MockEnv {
fn find_kind(&self, _type_name: &SymbolRef) -> Option<ArcKind> {
None
}
}
impl TypeEnv for MockEnv {
type Type = RcType;
fn find_type(&self, _id: &SymbolRef) -> Option<ArcType> {
None
}
fn find_type_info(&self, _id: &SymbolRef) -> Option<Alias<Symbol, RcType>> {
None
}
}
/// Returns a reference to the interner stored in TLD
pub fn get_local_interner() -> Rc<RefCell<Symbols>> {
thread_local!(static INTERNER: Rc<RefCell<Symbols>>
= Rc::new(RefCell::new(Symbols::new())));
INTERNER.with(|interner| interner.clone())
}
pub fn intern(s: &str) -> Symbol {
let interner = get_local_interner();
let mut interner = interner.borrow_mut();
if s.starts_with(char::is_lowercase) {
interner.simple_symbol(s)
} else {
SymbolModule::new("test".into(), &mut interner).scoped_symbol(s)
}
}
}
|
random_line_split
|
|
window.rs
|
use ::game::*;
use ::ncurses::*;
use std::ascii::AsciiExt;
use std::char;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::mem;
use std::iter::repeat;
use ::itertools::Itertools;
use ::consts::*;
pub struct NCursesWindow;
pub fn create() -> Box<Window> {
Box::new(NCursesWindow::new())
}
pub trait Window {
fn render(&self, &GameState);
fn handle_input(&self, &mut GameState) -> Option<InputEvent>;
}
impl NCursesWindow {
fn new() -> NCursesWindow {
// Enable all mouse events for the current terminal.
env::set_var("TERM", "xterm-1003");
setlocale(LcCategory::all, "");
initscr();
raw();
// Extended keyboard and mouse events.
keypad(stdscr(), true);
nodelay(stdscr(), true);
noecho();
let mouse_events = ALL_MOUSE_EVENTS | REPORT_MOUSE_POSITION;
mousemask(mouse_events as u32, None);
mouseinterval(0);
if has_mouse() {
info!("Mouse driver initialized.")
} else {
info!("Error initializing mouse driver.");
}
NCursesWindow
}
}
impl Drop for NCursesWindow {
fn drop(&mut self) {
refresh();
endwin();
}
}
impl Window for NCursesWindow {
fn handle_input(&self, game_state: &mut GameState) -> Option<InputEvent> {
let ch: i32 = getch();
// Allow WASD and HJKL controls
const KEY_W: i32 = 'w' as i32;
const KEY_A: i32 = 'a' as i32;
const KEY_S: i32 ='s' as i32;
const KEY_D: i32 = 'd' as i32;
const KEY_H: i32 = 'h' as i32;
const KEY_J: i32 = 'j' as i32;
const KEY_K: i32 = 'k' as i32;
const KEY_L: i32 = 'l' as i32;
const KEY_ESC: i32 = 27;
const KEY_ENTER: i32 = '\n' as i32;
match ch as i32 {
KEY_LEFT | KEY_A | KEY_H => Some(InputEvent::Left),
KEY_RIGHT | KEY_D | KEY_L => Some(InputEvent::Right),
KEY_UP | KEY_W | KEY_K => Some(InputEvent::Up),
KEY_DOWN | KEY_S | KEY_J => Some(InputEvent::Down),
KEY_MOUSE => {
let mut event: MEVENT = unsafe { mem::uninitialized() };
assert!(getmouse(&mut event) == OK);
game_state.cursor_position = (event.x, event.y);
if event.bstate & (BUTTON1_PRESSED as u32)!= 0 {
Some(InputEvent::Action)
} else {
None
}
}
KEY_ENTER => Some(InputEvent::Action),
KEY_ESC => Some(InputEvent::Quit),
_ => None,
}
}
fn render(&self, game_state: &GameState) {
refresh();
erase();
let starting_line = MARGIN + 5;
// If the game is over, render the ending state and return early.
if let Some(ref ending) = game_state.status {
match *ending {
GameEnding::Won => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
let reader = BufReader::new(File::open("resources/vault_boy.txt").unwrap());
let mut line_counter = 0;
for line in reader.lines().map(|l| l.unwrap()) {
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", line, WINDOW_WIDTH as usize));
line_counter += 1;
}
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", "ACCESS GRANTED", WINDOW_WIDTH as usize));
}
GameEnding::Lost => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
mvprintw((starting_line + ROWS) / 2,
0,
&format!("{:^1$}", "TERMINAL LOCKED", WINDOW_WIDTH as usize));
mvprintw((starting_line + ROWS + 1) / 2,
0,
&format!("{:^1$}",
"PLEASE CONTACT AN ADMINISTRATOR",
WINDOW_WIDTH as usize));
}
}
return;
}
// Print information at top
mvprintw(MARGIN, MARGIN, "ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL");
mvprintw(MARGIN + 1, MARGIN, "ENTER PASSWORD NOW");
mvprintw(LINES() - 1, 0, "Press Esc to exit");
// Print attempts remaining
let visual_attempts = repeat("█")
.take(game_state.attempts as usize)
.join(" ");
mvprintw(MARGIN + 3,
MARGIN,
&format!("{} ATTEMPT(S) LEFT: {}",
game_state.attempts,
visual_attempts));
// Draw random addresses and word columns
let highlight_positions = match game_state.get_entity_at_cursor() {
Some(cursor_entity) => {
if cursor_entity.highlighted() {
let (start, end) = cursor_entity.indices();
let start_x = start % WORD_COLUMN_WIDTH as usize;
let start_y = start / WORD_COLUMN_WIDTH as usize;
let end_x = end % WORD_COLUMN_WIDTH as usize;
let end_y = end / WORD_COLUMN_WIDTH as usize;
Some(((start_x, start_y), (end_x, end_y)))
} else {
None
}
}
None => None,
};
// Draw both columns
for (column_index, column) in game_state.columns.iter().enumerate() {
let word_data: Vec<char> = column.render_word_data().chars().collect::<Vec<char>>();
let word_chunks = word_data.chunks(WORD_COLUMN_WIDTH as usize);
for (line, (address, word_chunk)) in column.addresses
.iter()
.zip(word_chunks.into_iter())
.enumerate() {
let row = starting_line + line as i32;
let col = MARGIN + column_index as i32 * (COLUMN_WIDTH + COLUMN_PADDING);
let hex_address: String =
format!("{:#01$X}", address, ADDRESS_COLUMN_WIDTH as usize);
let word_row: String = word_chunk.iter().map(|&c| c).collect::<String>();
mvprintw(row, col, &(hex_address + " "));
if let Some(((start_x, start_y), (end_x, end_y))) = highlight_positions {
if game_state.get_cursor_column_index().unwrap()!= column_index {
// We're not in the correct column, so just write out the line and
// continue.
addstr(&word_row);
continue;
}
// If the highlight ends on the same line, we just iterate over the chunk and
// turn on and off the highlight at the start and the end.
if start_y == line && start_y == end_y {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else if start_y == line {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
addch(c as u32);
}
attroff(A_STANDOUT());
} else if end_y == line {
attron(A_STANDOUT());
for (i, c) in word_row.chars().enumerate() {
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else {
addstr(&word_row);
}
} else {
addstr(&word_row);
}
}
}
// Draw the console.
let console_entry = if let Some(entity) = game_state.get_entity_at_cursor() {
match *entity {
CursorEntity::Word { ref word,.. } => word.to_ascii_uppercase(),
CursorEntity::Brackets { ref pair,.. } => pair.0.to_string(),
}
} else {
// If we're in a column, display the character at the cursor. Otherwise, display an empty
// string.
match game_state.get_cursor_column_index() {
Some(..) => {
let (x, y) = game_state.cursor_position;
char::from_u32(mvinch(y, x) as u32).unwrap().to_string()
}
None => "".to_string(),
}
};
mvprintw(starting_line + ROWS - 1,
MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN,
&format!(">{}", console_entry));
// Draw the console entries, starting from the bottom.
let mut entries_row = starting_line + ROWS - 3;
for entry in game_state.entries.iter().rev() {
let col = MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN;
// Only prints the lines if the entry would be within the address columns.
let mvprintw_checked = |row, col, lines: &[&str]| {
for (i, line) in lines.iter().rev().enumerate() {
if row >= starting_line {
mvprintw(row - i as i32, col, line);
}
}
};
match *entry {
Entry::Incorrect { num_correct, ref word } => {
|
Entry::Correct { ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Exact match!",
">Please wait",
">while system",
">is accessed."]);
}
Entry::DudRemoval => {
mvprintw_checked(entries_row, col, &[">", ">Dud removed."]);
}
Entry::AllowanceReplenish => {
mvprintw_checked(entries_row, col, &[">", ">Allowance", ">replenished."]);
}
}
entries_row -= entry.display_rows() as i32;
}
// Move the cursor to the current position
let (x, y) = game_state.cursor_position;
mv(y, x);
}
}
|
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Entry denied",
&format!(">{}/{} correct.", num_correct, 7)]);
}
|
conditional_block
|
window.rs
|
use ::game::*;
use ::ncurses::*;
use std::ascii::AsciiExt;
use std::char;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::mem;
use std::iter::repeat;
use ::itertools::Itertools;
use ::consts::*;
pub struct
|
;
pub fn create() -> Box<Window> {
Box::new(NCursesWindow::new())
}
pub trait Window {
fn render(&self, &GameState);
fn handle_input(&self, &mut GameState) -> Option<InputEvent>;
}
impl NCursesWindow {
fn new() -> NCursesWindow {
// Enable all mouse events for the current terminal.
env::set_var("TERM", "xterm-1003");
setlocale(LcCategory::all, "");
initscr();
raw();
// Extended keyboard and mouse events.
keypad(stdscr(), true);
nodelay(stdscr(), true);
noecho();
let mouse_events = ALL_MOUSE_EVENTS | REPORT_MOUSE_POSITION;
mousemask(mouse_events as u32, None);
mouseinterval(0);
if has_mouse() {
info!("Mouse driver initialized.")
} else {
info!("Error initializing mouse driver.");
}
NCursesWindow
}
}
impl Drop for NCursesWindow {
fn drop(&mut self) {
refresh();
endwin();
}
}
impl Window for NCursesWindow {
fn handle_input(&self, game_state: &mut GameState) -> Option<InputEvent> {
let ch: i32 = getch();
// Allow WASD and HJKL controls
const KEY_W: i32 = 'w' as i32;
const KEY_A: i32 = 'a' as i32;
const KEY_S: i32 ='s' as i32;
const KEY_D: i32 = 'd' as i32;
const KEY_H: i32 = 'h' as i32;
const KEY_J: i32 = 'j' as i32;
const KEY_K: i32 = 'k' as i32;
const KEY_L: i32 = 'l' as i32;
const KEY_ESC: i32 = 27;
const KEY_ENTER: i32 = '\n' as i32;
match ch as i32 {
KEY_LEFT | KEY_A | KEY_H => Some(InputEvent::Left),
KEY_RIGHT | KEY_D | KEY_L => Some(InputEvent::Right),
KEY_UP | KEY_W | KEY_K => Some(InputEvent::Up),
KEY_DOWN | KEY_S | KEY_J => Some(InputEvent::Down),
KEY_MOUSE => {
let mut event: MEVENT = unsafe { mem::uninitialized() };
assert!(getmouse(&mut event) == OK);
game_state.cursor_position = (event.x, event.y);
if event.bstate & (BUTTON1_PRESSED as u32)!= 0 {
Some(InputEvent::Action)
} else {
None
}
}
KEY_ENTER => Some(InputEvent::Action),
KEY_ESC => Some(InputEvent::Quit),
_ => None,
}
}
fn render(&self, game_state: &GameState) {
refresh();
erase();
let starting_line = MARGIN + 5;
// If the game is over, render the ending state and return early.
if let Some(ref ending) = game_state.status {
match *ending {
GameEnding::Won => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
let reader = BufReader::new(File::open("resources/vault_boy.txt").unwrap());
let mut line_counter = 0;
for line in reader.lines().map(|l| l.unwrap()) {
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", line, WINDOW_WIDTH as usize));
line_counter += 1;
}
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", "ACCESS GRANTED", WINDOW_WIDTH as usize));
}
GameEnding::Lost => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
mvprintw((starting_line + ROWS) / 2,
0,
&format!("{:^1$}", "TERMINAL LOCKED", WINDOW_WIDTH as usize));
mvprintw((starting_line + ROWS + 1) / 2,
0,
&format!("{:^1$}",
"PLEASE CONTACT AN ADMINISTRATOR",
WINDOW_WIDTH as usize));
}
}
return;
}
// Print information at top
mvprintw(MARGIN, MARGIN, "ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL");
mvprintw(MARGIN + 1, MARGIN, "ENTER PASSWORD NOW");
mvprintw(LINES() - 1, 0, "Press Esc to exit");
// Print attempts remaining
let visual_attempts = repeat("█")
.take(game_state.attempts as usize)
.join(" ");
mvprintw(MARGIN + 3,
MARGIN,
&format!("{} ATTEMPT(S) LEFT: {}",
game_state.attempts,
visual_attempts));
// Draw random addresses and word columns
let highlight_positions = match game_state.get_entity_at_cursor() {
Some(cursor_entity) => {
if cursor_entity.highlighted() {
let (start, end) = cursor_entity.indices();
let start_x = start % WORD_COLUMN_WIDTH as usize;
let start_y = start / WORD_COLUMN_WIDTH as usize;
let end_x = end % WORD_COLUMN_WIDTH as usize;
let end_y = end / WORD_COLUMN_WIDTH as usize;
Some(((start_x, start_y), (end_x, end_y)))
} else {
None
}
}
None => None,
};
// Draw both columns
for (column_index, column) in game_state.columns.iter().enumerate() {
let word_data: Vec<char> = column.render_word_data().chars().collect::<Vec<char>>();
let word_chunks = word_data.chunks(WORD_COLUMN_WIDTH as usize);
for (line, (address, word_chunk)) in column.addresses
.iter()
.zip(word_chunks.into_iter())
.enumerate() {
let row = starting_line + line as i32;
let col = MARGIN + column_index as i32 * (COLUMN_WIDTH + COLUMN_PADDING);
let hex_address: String =
format!("{:#01$X}", address, ADDRESS_COLUMN_WIDTH as usize);
let word_row: String = word_chunk.iter().map(|&c| c).collect::<String>();
mvprintw(row, col, &(hex_address + " "));
if let Some(((start_x, start_y), (end_x, end_y))) = highlight_positions {
if game_state.get_cursor_column_index().unwrap()!= column_index {
// We're not in the correct column, so just write out the line and
// continue.
addstr(&word_row);
continue;
}
// If the highlight ends on the same line, we just iterate over the chunk and
// turn on and off the highlight at the start and the end.
if start_y == line && start_y == end_y {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else if start_y == line {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
addch(c as u32);
}
attroff(A_STANDOUT());
} else if end_y == line {
attron(A_STANDOUT());
for (i, c) in word_row.chars().enumerate() {
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else {
addstr(&word_row);
}
} else {
addstr(&word_row);
}
}
}
// Draw the console.
let console_entry = if let Some(entity) = game_state.get_entity_at_cursor() {
match *entity {
CursorEntity::Word { ref word,.. } => word.to_ascii_uppercase(),
CursorEntity::Brackets { ref pair,.. } => pair.0.to_string(),
}
} else {
// If we're in a column, display the character at the cursor. Otherwise, display an empty
// string.
match game_state.get_cursor_column_index() {
Some(..) => {
let (x, y) = game_state.cursor_position;
char::from_u32(mvinch(y, x) as u32).unwrap().to_string()
}
None => "".to_string(),
}
};
mvprintw(starting_line + ROWS - 1,
MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN,
&format!(">{}", console_entry));
// Draw the console entries, starting from the bottom.
let mut entries_row = starting_line + ROWS - 3;
for entry in game_state.entries.iter().rev() {
let col = MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN;
// Only prints the lines if the entry would be within the address columns.
let mvprintw_checked = |row, col, lines: &[&str]| {
for (i, line) in lines.iter().rev().enumerate() {
if row >= starting_line {
mvprintw(row - i as i32, col, line);
}
}
};
match *entry {
Entry::Incorrect { num_correct, ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Entry denied",
&format!(">{}/{} correct.", num_correct, 7)]);
}
Entry::Correct { ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Exact match!",
">Please wait",
">while system",
">is accessed."]);
}
Entry::DudRemoval => {
mvprintw_checked(entries_row, col, &[">", ">Dud removed."]);
}
Entry::AllowanceReplenish => {
mvprintw_checked(entries_row, col, &[">", ">Allowance", ">replenished."]);
}
}
entries_row -= entry.display_rows() as i32;
}
// Move the cursor to the current position
let (x, y) = game_state.cursor_position;
mv(y, x);
}
}
|
NCursesWindow
|
identifier_name
|
window.rs
|
use ::game::*;
use ::ncurses::*;
use std::ascii::AsciiExt;
use std::char;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::mem;
use std::iter::repeat;
use ::itertools::Itertools;
use ::consts::*;
pub struct NCursesWindow;
pub fn create() -> Box<Window> {
Box::new(NCursesWindow::new())
}
pub trait Window {
fn render(&self, &GameState);
fn handle_input(&self, &mut GameState) -> Option<InputEvent>;
}
impl NCursesWindow {
fn new() -> NCursesWindow {
// Enable all mouse events for the current terminal.
env::set_var("TERM", "xterm-1003");
setlocale(LcCategory::all, "");
initscr();
raw();
// Extended keyboard and mouse events.
keypad(stdscr(), true);
nodelay(stdscr(), true);
noecho();
let mouse_events = ALL_MOUSE_EVENTS | REPORT_MOUSE_POSITION;
mousemask(mouse_events as u32, None);
mouseinterval(0);
if has_mouse() {
info!("Mouse driver initialized.")
} else {
info!("Error initializing mouse driver.");
}
NCursesWindow
}
}
impl Drop for NCursesWindow {
fn drop(&mut self) {
refresh();
endwin();
}
}
impl Window for NCursesWindow {
fn handle_input(&self, game_state: &mut GameState) -> Option<InputEvent>
|
KEY_DOWN | KEY_S | KEY_J => Some(InputEvent::Down),
KEY_MOUSE => {
let mut event: MEVENT = unsafe { mem::uninitialized() };
assert!(getmouse(&mut event) == OK);
game_state.cursor_position = (event.x, event.y);
if event.bstate & (BUTTON1_PRESSED as u32)!= 0 {
Some(InputEvent::Action)
} else {
None
}
}
KEY_ENTER => Some(InputEvent::Action),
KEY_ESC => Some(InputEvent::Quit),
_ => None,
}
}
fn render(&self, game_state: &GameState) {
refresh();
erase();
let starting_line = MARGIN + 5;
// If the game is over, render the ending state and return early.
if let Some(ref ending) = game_state.status {
match *ending {
GameEnding::Won => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
let reader = BufReader::new(File::open("resources/vault_boy.txt").unwrap());
let mut line_counter = 0;
for line in reader.lines().map(|l| l.unwrap()) {
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", line, WINDOW_WIDTH as usize));
line_counter += 1;
}
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", "ACCESS GRANTED", WINDOW_WIDTH as usize));
}
GameEnding::Lost => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
mvprintw((starting_line + ROWS) / 2,
0,
&format!("{:^1$}", "TERMINAL LOCKED", WINDOW_WIDTH as usize));
mvprintw((starting_line + ROWS + 1) / 2,
0,
&format!("{:^1$}",
"PLEASE CONTACT AN ADMINISTRATOR",
WINDOW_WIDTH as usize));
}
}
return;
}
// Print information at top
mvprintw(MARGIN, MARGIN, "ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL");
mvprintw(MARGIN + 1, MARGIN, "ENTER PASSWORD NOW");
mvprintw(LINES() - 1, 0, "Press Esc to exit");
// Print attempts remaining
let visual_attempts = repeat("█")
.take(game_state.attempts as usize)
.join(" ");
mvprintw(MARGIN + 3,
MARGIN,
&format!("{} ATTEMPT(S) LEFT: {}",
game_state.attempts,
visual_attempts));
// Draw random addresses and word columns
let highlight_positions = match game_state.get_entity_at_cursor() {
Some(cursor_entity) => {
if cursor_entity.highlighted() {
let (start, end) = cursor_entity.indices();
let start_x = start % WORD_COLUMN_WIDTH as usize;
let start_y = start / WORD_COLUMN_WIDTH as usize;
let end_x = end % WORD_COLUMN_WIDTH as usize;
let end_y = end / WORD_COLUMN_WIDTH as usize;
Some(((start_x, start_y), (end_x, end_y)))
} else {
None
}
}
None => None,
};
// Draw both columns
for (column_index, column) in game_state.columns.iter().enumerate() {
let word_data: Vec<char> = column.render_word_data().chars().collect::<Vec<char>>();
let word_chunks = word_data.chunks(WORD_COLUMN_WIDTH as usize);
for (line, (address, word_chunk)) in column.addresses
.iter()
.zip(word_chunks.into_iter())
.enumerate() {
let row = starting_line + line as i32;
let col = MARGIN + column_index as i32 * (COLUMN_WIDTH + COLUMN_PADDING);
let hex_address: String =
format!("{:#01$X}", address, ADDRESS_COLUMN_WIDTH as usize);
let word_row: String = word_chunk.iter().map(|&c| c).collect::<String>();
mvprintw(row, col, &(hex_address + " "));
if let Some(((start_x, start_y), (end_x, end_y))) = highlight_positions {
if game_state.get_cursor_column_index().unwrap()!= column_index {
// We're not in the correct column, so just write out the line and
// continue.
addstr(&word_row);
continue;
}
// If the highlight ends on the same line, we just iterate over the chunk and
// turn on and off the highlight at the start and the end.
if start_y == line && start_y == end_y {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else if start_y == line {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
addch(c as u32);
}
attroff(A_STANDOUT());
} else if end_y == line {
attron(A_STANDOUT());
for (i, c) in word_row.chars().enumerate() {
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else {
addstr(&word_row);
}
} else {
addstr(&word_row);
}
}
}
// Draw the console.
let console_entry = if let Some(entity) = game_state.get_entity_at_cursor() {
match *entity {
CursorEntity::Word { ref word,.. } => word.to_ascii_uppercase(),
CursorEntity::Brackets { ref pair,.. } => pair.0.to_string(),
}
} else {
// If we're in a column, display the character at the cursor. Otherwise, display an empty
// string.
match game_state.get_cursor_column_index() {
Some(..) => {
let (x, y) = game_state.cursor_position;
char::from_u32(mvinch(y, x) as u32).unwrap().to_string()
}
None => "".to_string(),
}
};
mvprintw(starting_line + ROWS - 1,
MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN,
&format!(">{}", console_entry));
// Draw the console entries, starting from the bottom.
let mut entries_row = starting_line + ROWS - 3;
for entry in game_state.entries.iter().rev() {
let col = MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN;
// Only prints the lines if the entry would be within the address columns.
let mvprintw_checked = |row, col, lines: &[&str]| {
for (i, line) in lines.iter().rev().enumerate() {
if row >= starting_line {
mvprintw(row - i as i32, col, line);
}
}
};
match *entry {
Entry::Incorrect { num_correct, ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Entry denied",
&format!(">{}/{} correct.", num_correct, 7)]);
}
Entry::Correct { ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Exact match!",
">Please wait",
">while system",
">is accessed."]);
}
Entry::DudRemoval => {
mvprintw_checked(entries_row, col, &[">", ">Dud removed."]);
}
Entry::AllowanceReplenish => {
mvprintw_checked(entries_row, col, &[">", ">Allowance", ">replenished."]);
}
}
entries_row -= entry.display_rows() as i32;
}
// Move the cursor to the current position
let (x, y) = game_state.cursor_position;
mv(y, x);
}
}
|
{
let ch: i32 = getch();
// Allow WASD and HJKL controls
const KEY_W: i32 = 'w' as i32;
const KEY_A: i32 = 'a' as i32;
const KEY_S: i32 = 's' as i32;
const KEY_D: i32 = 'd' as i32;
const KEY_H: i32 = 'h' as i32;
const KEY_J: i32 = 'j' as i32;
const KEY_K: i32 = 'k' as i32;
const KEY_L: i32 = 'l' as i32;
const KEY_ESC: i32 = 27;
const KEY_ENTER: i32 = '\n' as i32;
match ch as i32 {
KEY_LEFT | KEY_A | KEY_H => Some(InputEvent::Left),
KEY_RIGHT | KEY_D | KEY_L => Some(InputEvent::Right),
KEY_UP | KEY_W | KEY_K => Some(InputEvent::Up),
|
identifier_body
|
window.rs
|
use ::game::*;
use ::ncurses::*;
use std::ascii::AsciiExt;
use std::char;
use std::env;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::mem;
use std::iter::repeat;
use ::itertools::Itertools;
use ::consts::*;
pub struct NCursesWindow;
pub fn create() -> Box<Window> {
Box::new(NCursesWindow::new())
}
pub trait Window {
fn render(&self, &GameState);
fn handle_input(&self, &mut GameState) -> Option<InputEvent>;
}
impl NCursesWindow {
fn new() -> NCursesWindow {
// Enable all mouse events for the current terminal.
env::set_var("TERM", "xterm-1003");
setlocale(LcCategory::all, "");
initscr();
raw();
// Extended keyboard and mouse events.
keypad(stdscr(), true);
nodelay(stdscr(), true);
noecho();
let mouse_events = ALL_MOUSE_EVENTS | REPORT_MOUSE_POSITION;
mousemask(mouse_events as u32, None);
mouseinterval(0);
if has_mouse() {
info!("Mouse driver initialized.")
} else {
info!("Error initializing mouse driver.");
}
NCursesWindow
}
}
impl Drop for NCursesWindow {
fn drop(&mut self) {
refresh();
endwin();
}
}
impl Window for NCursesWindow {
fn handle_input(&self, game_state: &mut GameState) -> Option<InputEvent> {
let ch: i32 = getch();
// Allow WASD and HJKL controls
const KEY_W: i32 = 'w' as i32;
const KEY_A: i32 = 'a' as i32;
const KEY_S: i32 ='s' as i32;
const KEY_D: i32 = 'd' as i32;
const KEY_H: i32 = 'h' as i32;
const KEY_J: i32 = 'j' as i32;
const KEY_K: i32 = 'k' as i32;
const KEY_L: i32 = 'l' as i32;
const KEY_ESC: i32 = 27;
const KEY_ENTER: i32 = '\n' as i32;
match ch as i32 {
KEY_LEFT | KEY_A | KEY_H => Some(InputEvent::Left),
KEY_RIGHT | KEY_D | KEY_L => Some(InputEvent::Right),
KEY_UP | KEY_W | KEY_K => Some(InputEvent::Up),
KEY_DOWN | KEY_S | KEY_J => Some(InputEvent::Down),
KEY_MOUSE => {
let mut event: MEVENT = unsafe { mem::uninitialized() };
assert!(getmouse(&mut event) == OK);
game_state.cursor_position = (event.x, event.y);
if event.bstate & (BUTTON1_PRESSED as u32)!= 0 {
Some(InputEvent::Action)
} else {
None
}
}
KEY_ENTER => Some(InputEvent::Action),
KEY_ESC => Some(InputEvent::Quit),
_ => None,
}
}
fn render(&self, game_state: &GameState) {
refresh();
erase();
let starting_line = MARGIN + 5;
// If the game is over, render the ending state and return early.
if let Some(ref ending) = game_state.status {
match *ending {
GameEnding::Won => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
let reader = BufReader::new(File::open("resources/vault_boy.txt").unwrap());
let mut line_counter = 0;
for line in reader.lines().map(|l| l.unwrap()) {
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", line, WINDOW_WIDTH as usize));
line_counter += 1;
}
mvprintw(line_counter as i32,
0,
&format!("{:^1$}", "ACCESS GRANTED", WINDOW_WIDTH as usize));
|
mvprintw((starting_line + ROWS) / 2,
0,
&format!("{:^1$}", "TERMINAL LOCKED", WINDOW_WIDTH as usize));
mvprintw((starting_line + ROWS + 1) / 2,
0,
&format!("{:^1$}",
"PLEASE CONTACT AN ADMINISTRATOR",
WINDOW_WIDTH as usize));
}
}
return;
}
// Print information at top
mvprintw(MARGIN, MARGIN, "ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL");
mvprintw(MARGIN + 1, MARGIN, "ENTER PASSWORD NOW");
mvprintw(LINES() - 1, 0, "Press Esc to exit");
// Print attempts remaining
let visual_attempts = repeat("█")
.take(game_state.attempts as usize)
.join(" ");
mvprintw(MARGIN + 3,
MARGIN,
&format!("{} ATTEMPT(S) LEFT: {}",
game_state.attempts,
visual_attempts));
// Draw random addresses and word columns
let highlight_positions = match game_state.get_entity_at_cursor() {
Some(cursor_entity) => {
if cursor_entity.highlighted() {
let (start, end) = cursor_entity.indices();
let start_x = start % WORD_COLUMN_WIDTH as usize;
let start_y = start / WORD_COLUMN_WIDTH as usize;
let end_x = end % WORD_COLUMN_WIDTH as usize;
let end_y = end / WORD_COLUMN_WIDTH as usize;
Some(((start_x, start_y), (end_x, end_y)))
} else {
None
}
}
None => None,
};
// Draw both columns
for (column_index, column) in game_state.columns.iter().enumerate() {
let word_data: Vec<char> = column.render_word_data().chars().collect::<Vec<char>>();
let word_chunks = word_data.chunks(WORD_COLUMN_WIDTH as usize);
for (line, (address, word_chunk)) in column.addresses
.iter()
.zip(word_chunks.into_iter())
.enumerate() {
let row = starting_line + line as i32;
let col = MARGIN + column_index as i32 * (COLUMN_WIDTH + COLUMN_PADDING);
let hex_address: String =
format!("{:#01$X}", address, ADDRESS_COLUMN_WIDTH as usize);
let word_row: String = word_chunk.iter().map(|&c| c).collect::<String>();
mvprintw(row, col, &(hex_address + " "));
if let Some(((start_x, start_y), (end_x, end_y))) = highlight_positions {
if game_state.get_cursor_column_index().unwrap()!= column_index {
// We're not in the correct column, so just write out the line and
// continue.
addstr(&word_row);
continue;
}
// If the highlight ends on the same line, we just iterate over the chunk and
// turn on and off the highlight at the start and the end.
if start_y == line && start_y == end_y {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else if start_y == line {
for (i, c) in word_row.chars().enumerate() {
if i == start_x {
attron(A_STANDOUT());
}
addch(c as u32);
}
attroff(A_STANDOUT());
} else if end_y == line {
attron(A_STANDOUT());
for (i, c) in word_row.chars().enumerate() {
if i == end_x {
attroff(A_STANDOUT());
}
addch(c as u32);
}
} else {
addstr(&word_row);
}
} else {
addstr(&word_row);
}
}
}
// Draw the console.
let console_entry = if let Some(entity) = game_state.get_entity_at_cursor() {
match *entity {
CursorEntity::Word { ref word,.. } => word.to_ascii_uppercase(),
CursorEntity::Brackets { ref pair,.. } => pair.0.to_string(),
}
} else {
// If we're in a column, display the character at the cursor. Otherwise, display an empty
// string.
match game_state.get_cursor_column_index() {
Some(..) => {
let (x, y) = game_state.cursor_position;
char::from_u32(mvinch(y, x) as u32).unwrap().to_string()
}
None => "".to_string(),
}
};
mvprintw(starting_line + ROWS - 1,
MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN,
&format!(">{}", console_entry));
// Draw the console entries, starting from the bottom.
let mut entries_row = starting_line + ROWS - 3;
for entry in game_state.entries.iter().rev() {
let col = MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN;
// Only prints the lines if the entry would be within the address columns.
let mvprintw_checked = |row, col, lines: &[&str]| {
for (i, line) in lines.iter().rev().enumerate() {
if row >= starting_line {
mvprintw(row - i as i32, col, line);
}
}
};
match *entry {
Entry::Incorrect { num_correct, ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Entry denied",
&format!(">{}/{} correct.", num_correct, 7)]);
}
Entry::Correct { ref word } => {
mvprintw_checked(entries_row,
col,
&[&format!(">{}", word.to_ascii_uppercase()),
">Exact match!",
">Please wait",
">while system",
">is accessed."]);
}
Entry::DudRemoval => {
mvprintw_checked(entries_row, col, &[">", ">Dud removed."]);
}
Entry::AllowanceReplenish => {
mvprintw_checked(entries_row, col, &[">", ">Allowance", ">replenished."]);
}
}
entries_row -= entry.display_rows() as i32;
}
// Move the cursor to the current position
let (x, y) = game_state.cursor_position;
mv(y, x);
}
}
|
}
GameEnding::Lost => {
curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE);
|
random_line_split
|
u32x4.rs
|
// Copyright 2015 blake2-rfc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
use simdty::u32x4;
#[cfg(feature = "simd_opt")]
#[inline(always)]
pub fn
|
(vec: u32x4, n: u32) -> u32x4 {
match n {
16 => rotate_right_16(vec),
8 => rotate_right_8(vec),
_ => rotate_right_any(vec, n),
}
}
#[cfg(not(feature = "simd_opt"))]
#[inline(always)]
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4 {
rotate_right_any(vec, n)
}
#[inline(always)]
fn rotate_right_any(vec: u32x4, n: u32) -> u32x4 {
let r = n as u32;
let l = 32 - r;
(vec >> u32x4::new(r, r, r, r)) ^ (vec << u32x4::new(l, l, l, l))
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_16(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 2, 3, 0, 1,
6, 7, 4, 5,
10, 11, 8, 9,
14, 15, 12, 13])
} else if cfg!(any(target_feature = "sse2", target_feature = "neon")) {
// pshuflw+pshufhw (SSE2) / vrev (NEON)
transmute_shuffle!(u16x8, simd_shuffle8, vec,
[1, 0,
3, 2,
5, 4,
7, 6])
} else {
rotate_right_any(vec, 16)
}
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_8(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 1, 2, 3, 0,
5, 6, 7, 4,
9, 10, 11, 8,
13, 14, 15, 12])
} else {
rotate_right_any(vec, 8)
}
}
|
rotate_right_const
|
identifier_name
|
u32x4.rs
|
// Copyright 2015 blake2-rfc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
use simdty::u32x4;
#[cfg(feature = "simd_opt")]
#[inline(always)]
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4 {
match n {
16 => rotate_right_16(vec),
8 => rotate_right_8(vec),
_ => rotate_right_any(vec, n),
}
}
#[cfg(not(feature = "simd_opt"))]
#[inline(always)]
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4
|
#[inline(always)]
fn rotate_right_any(vec: u32x4, n: u32) -> u32x4 {
let r = n as u32;
let l = 32 - r;
(vec >> u32x4::new(r, r, r, r)) ^ (vec << u32x4::new(l, l, l, l))
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_16(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 2, 3, 0, 1,
6, 7, 4, 5,
10, 11, 8, 9,
14, 15, 12, 13])
} else if cfg!(any(target_feature = "sse2", target_feature = "neon")) {
// pshuflw+pshufhw (SSE2) / vrev (NEON)
transmute_shuffle!(u16x8, simd_shuffle8, vec,
[1, 0,
3, 2,
5, 4,
7, 6])
} else {
rotate_right_any(vec, 16)
}
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_8(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 1, 2, 3, 0,
5, 6, 7, 4,
9, 10, 11, 8,
13, 14, 15, 12])
} else {
rotate_right_any(vec, 8)
}
}
|
{
rotate_right_any(vec, n)
}
|
identifier_body
|
u32x4.rs
|
// Copyright 2015 blake2-rfc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
use simdty::u32x4;
|
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4 {
match n {
16 => rotate_right_16(vec),
8 => rotate_right_8(vec),
_ => rotate_right_any(vec, n),
}
}
#[cfg(not(feature = "simd_opt"))]
#[inline(always)]
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4 {
rotate_right_any(vec, n)
}
#[inline(always)]
fn rotate_right_any(vec: u32x4, n: u32) -> u32x4 {
let r = n as u32;
let l = 32 - r;
(vec >> u32x4::new(r, r, r, r)) ^ (vec << u32x4::new(l, l, l, l))
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_16(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 2, 3, 0, 1,
6, 7, 4, 5,
10, 11, 8, 9,
14, 15, 12, 13])
} else if cfg!(any(target_feature = "sse2", target_feature = "neon")) {
// pshuflw+pshufhw (SSE2) / vrev (NEON)
transmute_shuffle!(u16x8, simd_shuffle8, vec,
[1, 0,
3, 2,
5, 4,
7, 6])
} else {
rotate_right_any(vec, 16)
}
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_8(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 1, 2, 3, 0,
5, 6, 7, 4,
9, 10, 11, 8,
13, 14, 15, 12])
} else {
rotate_right_any(vec, 8)
}
}
|
#[cfg(feature = "simd_opt")]
#[inline(always)]
|
random_line_split
|
u32x4.rs
|
// Copyright 2015 blake2-rfc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
use simdty::u32x4;
#[cfg(feature = "simd_opt")]
#[inline(always)]
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4 {
match n {
16 => rotate_right_16(vec),
8 => rotate_right_8(vec),
_ => rotate_right_any(vec, n),
}
}
#[cfg(not(feature = "simd_opt"))]
#[inline(always)]
pub fn rotate_right_const(vec: u32x4, n: u32) -> u32x4 {
rotate_right_any(vec, n)
}
#[inline(always)]
fn rotate_right_any(vec: u32x4, n: u32) -> u32x4 {
let r = n as u32;
let l = 32 - r;
(vec >> u32x4::new(r, r, r, r)) ^ (vec << u32x4::new(l, l, l, l))
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_16(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 2, 3, 0, 1,
6, 7, 4, 5,
10, 11, 8, 9,
14, 15, 12, 13])
} else if cfg!(any(target_feature = "sse2", target_feature = "neon"))
|
else {
rotate_right_any(vec, 16)
}
}
#[cfg(feature = "simd_opt")]
#[inline(always)]
fn rotate_right_8(vec: u32x4) -> u32x4 {
if cfg!(target_feature = "ssse3") {
// pshufb (SSSE3) / vpshufb (AVX2)
transmute_shuffle!(u8x16, simd_shuffle16, vec,
[ 1, 2, 3, 0,
5, 6, 7, 4,
9, 10, 11, 8,
13, 14, 15, 12])
} else {
rotate_right_any(vec, 8)
}
}
|
{
// pshuflw+pshufhw (SSE2) / vrev (NEON)
transmute_shuffle!(u16x8, simd_shuffle8, vec,
[1, 0,
3, 2,
5, 4,
7, 6])
}
|
conditional_block
|
source_util.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap;
use codemap::{FileMap, Loc, Pos, ExpandedFrom, span};
use codemap::{CallInfo, NameAndSpan};
use ext::base::*;
use ext::base;
use ext::build::{mk_base_vec_e, mk_uint, mk_u8, mk_base_str};
use parse;
use print::pprust;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/* line!(): expands to the current line number */
pub fn expand_line(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.line))
}
/* col!(): expands to the current column number */
pub fn expand_col(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "col!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.col.to_uint()))
}
/* file!(): expands to the current filename */
/* The filemap (`loc.file`) contains a bunch more information we could spit
* out if we wanted. */
pub fn expand_file(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = topmost_expn_info(cx.backtrace().get());
let Loc { file: @FileMap { name: filename, _ }, _ } =
cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_base_str(cx, topmost.call_site, filename))
}
pub fn
|
(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let s = pprust::tts_to_str(tts, cx.parse_sess().interner);
base::MRExpr(mk_base_str(cx, sp, s))
}
pub fn expand_mod(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "module_path!");
base::MRExpr(mk_base_str(cx, sp,
str::connect(cx.mod_path().map(
|x| cx.str_of(*x)), ~"::")))
}
// include! : parse the given file as an expr
// This is generally a bad idea because it's going to behave
// unhygienically.
pub fn expand_include(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include!");
let p = parse::new_sub_parser_from_file(
cx.parse_sess(), cx.cfg(),
&res_rel_file(cx, sp, &Path(file)), sp);
base::MRExpr(p.parse_expr())
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_str!");
let res = io::read_whole_file_str(&res_rel_file(cx, sp, &Path(file)));
match res {
result::Ok(_) => { /* Continue. */ }
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e));
}
}
base::MRExpr(mk_base_str(cx, sp, result::unwrap(res)))
}
pub fn expand_include_bin(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_bin!");
match io::read_whole_file(&res_rel_file(cx, sp, &Path(file))) {
result::Ok(src) => {
let u8_exprs = vec::map(src, |char| {
mk_u8(cx, sp, *char)
});
base::MRExpr(mk_base_vec_e(cx, sp, u8_exprs))
}
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e))
}
}
}
// recur along an ExpnInfo chain to find the original expression
fn topmost_expn_info(expn_info: @codemap::ExpnInfo) -> @codemap::ExpnInfo {
match *expn_info {
ExpandedFrom(CallInfo { call_site: ref call_site, _ }) => {
match call_site.expn_info {
Some(next_expn_info) => {
match *next_expn_info {
ExpandedFrom(CallInfo {
callee: NameAndSpan { name: ref name, _ },
_
}) => {
// Don't recurse into file using "include!"
if *name == ~"include" {
expn_info
} else {
topmost_expn_info(next_expn_info)
}
}
}
},
None => expn_info
}
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: @ext_ctxt, sp: codemap::span, arg: &Path) -> Path {
// NB: relative paths are resolved relative to the compilation unit
if!arg.is_absolute {
let cu = Path(cx.codemap().span_to_filename(sp));
cu.dir_path().push_many(arg.components)
} else {
copy *arg
}
}
|
expand_stringify
|
identifier_name
|
source_util.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap;
use codemap::{FileMap, Loc, Pos, ExpandedFrom, span};
use codemap::{CallInfo, NameAndSpan};
use ext::base::*;
use ext::base;
use ext::build::{mk_base_vec_e, mk_uint, mk_u8, mk_base_str};
use parse;
use print::pprust;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/* line!(): expands to the current line number */
pub fn expand_line(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.line))
}
/* col!(): expands to the current column number */
pub fn expand_col(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "col!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.col.to_uint()))
}
/* file!(): expands to the current filename */
/* The filemap (`loc.file`) contains a bunch more information we could spit
* out if we wanted. */
pub fn expand_file(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = topmost_expn_info(cx.backtrace().get());
let Loc { file: @FileMap { name: filename, _ }, _ } =
cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_base_str(cx, topmost.call_site, filename))
}
pub fn expand_stringify(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let s = pprust::tts_to_str(tts, cx.parse_sess().interner);
base::MRExpr(mk_base_str(cx, sp, s))
}
pub fn expand_mod(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "module_path!");
base::MRExpr(mk_base_str(cx, sp,
str::connect(cx.mod_path().map(
|x| cx.str_of(*x)), ~"::")))
}
// include! : parse the given file as an expr
// This is generally a bad idea because it's going to behave
// unhygienically.
pub fn expand_include(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include!");
let p = parse::new_sub_parser_from_file(
cx.parse_sess(), cx.cfg(),
&res_rel_file(cx, sp, &Path(file)), sp);
base::MRExpr(p.parse_expr())
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_str!");
let res = io::read_whole_file_str(&res_rel_file(cx, sp, &Path(file)));
match res {
result::Ok(_) => { /* Continue. */ }
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e));
}
}
base::MRExpr(mk_base_str(cx, sp, result::unwrap(res)))
}
pub fn expand_include_bin(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_bin!");
match io::read_whole_file(&res_rel_file(cx, sp, &Path(file))) {
result::Ok(src) => {
let u8_exprs = vec::map(src, |char| {
mk_u8(cx, sp, *char)
});
base::MRExpr(mk_base_vec_e(cx, sp, u8_exprs))
}
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e))
}
}
}
// recur along an ExpnInfo chain to find the original expression
fn topmost_expn_info(expn_info: @codemap::ExpnInfo) -> @codemap::ExpnInfo {
match *expn_info {
ExpandedFrom(CallInfo { call_site: ref call_site, _ }) => {
match call_site.expn_info {
Some(next_expn_info) => {
match *next_expn_info {
ExpandedFrom(CallInfo {
callee: NameAndSpan { name: ref name, _ },
_
}) => {
|
if *name == ~"include" {
expn_info
} else {
topmost_expn_info(next_expn_info)
}
}
}
},
None => expn_info
}
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: @ext_ctxt, sp: codemap::span, arg: &Path) -> Path {
// NB: relative paths are resolved relative to the compilation unit
if!arg.is_absolute {
let cu = Path(cx.codemap().span_to_filename(sp));
cu.dir_path().push_many(arg.components)
} else {
copy *arg
}
}
|
// Don't recurse into file using "include!"
|
random_line_split
|
source_util.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap;
use codemap::{FileMap, Loc, Pos, ExpandedFrom, span};
use codemap::{CallInfo, NameAndSpan};
use ext::base::*;
use ext::base;
use ext::build::{mk_base_vec_e, mk_uint, mk_u8, mk_base_str};
use parse;
use print::pprust;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/* line!(): expands to the current line number */
pub fn expand_line(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.line))
}
/* col!(): expands to the current column number */
pub fn expand_col(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "col!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.col.to_uint()))
}
/* file!(): expands to the current filename */
/* The filemap (`loc.file`) contains a bunch more information we could spit
* out if we wanted. */
pub fn expand_file(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = topmost_expn_info(cx.backtrace().get());
let Loc { file: @FileMap { name: filename, _ }, _ } =
cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_base_str(cx, topmost.call_site, filename))
}
pub fn expand_stringify(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let s = pprust::tts_to_str(tts, cx.parse_sess().interner);
base::MRExpr(mk_base_str(cx, sp, s))
}
pub fn expand_mod(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "module_path!");
base::MRExpr(mk_base_str(cx, sp,
str::connect(cx.mod_path().map(
|x| cx.str_of(*x)), ~"::")))
}
// include! : parse the given file as an expr
// This is generally a bad idea because it's going to behave
// unhygienically.
pub fn expand_include(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include!");
let p = parse::new_sub_parser_from_file(
cx.parse_sess(), cx.cfg(),
&res_rel_file(cx, sp, &Path(file)), sp);
base::MRExpr(p.parse_expr())
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_str!");
let res = io::read_whole_file_str(&res_rel_file(cx, sp, &Path(file)));
match res {
result::Ok(_) => { /* Continue. */ }
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e));
}
}
base::MRExpr(mk_base_str(cx, sp, result::unwrap(res)))
}
pub fn expand_include_bin(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_bin!");
match io::read_whole_file(&res_rel_file(cx, sp, &Path(file))) {
result::Ok(src) => {
let u8_exprs = vec::map(src, |char| {
mk_u8(cx, sp, *char)
});
base::MRExpr(mk_base_vec_e(cx, sp, u8_exprs))
}
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e))
}
}
}
// recur along an ExpnInfo chain to find the original expression
fn topmost_expn_info(expn_info: @codemap::ExpnInfo) -> @codemap::ExpnInfo {
match *expn_info {
ExpandedFrom(CallInfo { call_site: ref call_site, _ }) => {
match call_site.expn_info {
Some(next_expn_info) => {
match *next_expn_info {
ExpandedFrom(CallInfo {
callee: NameAndSpan { name: ref name, _ },
_
}) => {
// Don't recurse into file using "include!"
if *name == ~"include" {
expn_info
} else {
topmost_expn_info(next_expn_info)
}
}
}
},
None => expn_info
}
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: @ext_ctxt, sp: codemap::span, arg: &Path) -> Path
|
{
// NB: relative paths are resolved relative to the compilation unit
if !arg.is_absolute {
let cu = Path(cx.codemap().span_to_filename(sp));
cu.dir_path().push_many(arg.components)
} else {
copy *arg
}
}
|
identifier_body
|
|
source_util.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap;
use codemap::{FileMap, Loc, Pos, ExpandedFrom, span};
use codemap::{CallInfo, NameAndSpan};
use ext::base::*;
use ext::base;
use ext::build::{mk_base_vec_e, mk_uint, mk_u8, mk_base_str};
use parse;
use print::pprust;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/* line!(): expands to the current line number */
pub fn expand_line(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.line))
}
/* col!(): expands to the current column number */
pub fn expand_col(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "col!");
let topmost = topmost_expn_info(cx.backtrace().get());
let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_uint(cx, topmost.call_site, loc.col.to_uint()))
}
/* file!(): expands to the current filename */
/* The filemap (`loc.file`) contains a bunch more information we could spit
* out if we wanted. */
pub fn expand_file(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = topmost_expn_info(cx.backtrace().get());
let Loc { file: @FileMap { name: filename, _ }, _ } =
cx.codemap().lookup_char_pos(topmost.call_site.lo);
base::MRExpr(mk_base_str(cx, topmost.call_site, filename))
}
pub fn expand_stringify(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let s = pprust::tts_to_str(tts, cx.parse_sess().interner);
base::MRExpr(mk_base_str(cx, sp, s))
}
pub fn expand_mod(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
base::check_zero_tts(cx, sp, tts, "module_path!");
base::MRExpr(mk_base_str(cx, sp,
str::connect(cx.mod_path().map(
|x| cx.str_of(*x)), ~"::")))
}
// include! : parse the given file as an expr
// This is generally a bad idea because it's going to behave
// unhygienically.
pub fn expand_include(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include!");
let p = parse::new_sub_parser_from_file(
cx.parse_sess(), cx.cfg(),
&res_rel_file(cx, sp, &Path(file)), sp);
base::MRExpr(p.parse_expr())
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_str!");
let res = io::read_whole_file_str(&res_rel_file(cx, sp, &Path(file)));
match res {
result::Ok(_) => { /* Continue. */ }
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e));
}
}
base::MRExpr(mk_base_str(cx, sp, result::unwrap(res)))
}
pub fn expand_include_bin(cx: @ext_ctxt, sp: span, tts: &[ast::token_tree])
-> base::MacResult {
let file = get_single_str_from_tts(cx, sp, tts, "include_bin!");
match io::read_whole_file(&res_rel_file(cx, sp, &Path(file))) {
result::Ok(src) =>
|
result::Err(ref e) => {
cx.parse_sess().span_diagnostic.handler().fatal((*e))
}
}
}
// recur along an ExpnInfo chain to find the original expression
fn topmost_expn_info(expn_info: @codemap::ExpnInfo) -> @codemap::ExpnInfo {
match *expn_info {
ExpandedFrom(CallInfo { call_site: ref call_site, _ }) => {
match call_site.expn_info {
Some(next_expn_info) => {
match *next_expn_info {
ExpandedFrom(CallInfo {
callee: NameAndSpan { name: ref name, _ },
_
}) => {
// Don't recurse into file using "include!"
if *name == ~"include" {
expn_info
} else {
topmost_expn_info(next_expn_info)
}
}
}
},
None => expn_info
}
}
}
}
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: @ext_ctxt, sp: codemap::span, arg: &Path) -> Path {
// NB: relative paths are resolved relative to the compilation unit
if!arg.is_absolute {
let cu = Path(cx.codemap().span_to_filename(sp));
cu.dir_path().push_many(arg.components)
} else {
copy *arg
}
}
|
{
let u8_exprs = vec::map(src, |char| {
mk_u8(cx, sp, *char)
});
base::MRExpr(mk_base_vec_e(cx, sp, u8_exprs))
}
|
conditional_block
|
either.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type that represents one of two alternatives
use cmp::Eq;
use kinds::Copy;
use result::Result;
use result;
use vec;
/// The either type
#[deriving(Clone, Eq)]
pub enum Either<T, U> {
Left(T),
Right(U)
}
#[inline(always)]
pub fn either<T, U, V>(f_left: &fn(&T) -> V,
f_right: &fn(&U) -> V, value: &Either<T, U>) -> V {
/*!
* Applies a function based on the given either value
*
* If `value` is left(T) then `f_left` is applied to its contents, if
* `value` is right(U) then `f_right` is applied to its contents, and the
* result is returned.
*/
match *value {
Left(ref l) => f_left(l),
Right(ref r) => f_right(r)
}
}
pub fn lefts<T:Copy,U>(eithers: &[Either<T, U>]) -> ~[T] {
//! Extracts from a vector of either all the left values
do vec::build_sized(eithers.len()) |push| {
for vec::each(eithers) |elt| {
match *elt {
Left(ref l) => { push(*l); }
_ => { /* fallthrough */ }
}
}
}
}
pub fn rights<T, U: Copy>(eithers: &[Either<T, U>]) -> ~[U] {
//! Extracts from a vector of either all the right values
do vec::build_sized(eithers.len()) |push| {
for vec::each(eithers) |elt| {
match *elt {
Right(ref r) => { push(*r); }
_ => { /* fallthrough */ }
}
}
}
}
pub fn partition<T, U>(eithers: ~[Either<T, U>])
-> (~[T], ~[U]) {
/*!
* Extracts from a vector of either all the left values and right values
*
* Returns a structure containing a vector of left values and a vector of
* right values.
*/
let mut lefts: ~[T] = ~[];
let mut rights: ~[U] = ~[];
do vec::consume(eithers) |_i, elt| {
match elt {
Left(l) => lefts.push(l),
Right(r) => rights.push(r)
}
}
return (lefts, rights);
}
#[inline(always)]
pub fn flip<T, U>(eith: Either<T, U>) -> Either<U, T> {
//! Flips between left and right of a given either
match eith {
Right(r) => Left(r),
Left(l) => Right(l)
}
}
#[inline(always)]
pub fn to_result<T, U>(eith: Either<T, U>)
-> Result<U, T> {
/*!
* Converts either::t to a result::t
*
* Converts an `either` type to a `result` type, making the "right" choice
* an ok result, and the "left" choice a fail
*/
match eith {
Right(r) => result::Ok(r),
Left(l) => result::Err(l)
}
}
#[inline(always)]
pub fn is_left<T, U>(eith: &Either<T, U>) -> bool {
//! Checks whether the given value is a left
match *eith { Left(_) => true, _ => false }
}
#[inline(always)]
pub fn is_right<T, U>(eith: &Either<T, U>) -> bool {
//! Checks whether the given value is a right
match *eith { Right(_) => true, _ => false }
}
#[inline(always)]
pub fn unwrap_left<T,U>(eith: Either<T,U>) -> T {
//! Retrieves the value in the left branch. Fails if the either is Right.
match eith {
Left(x) => x,
Right(_) => fail!(~"either::unwrap_left Right")
}
}
#[inline(always)]
pub fn unwrap_right<T,U>(eith: Either<T,U>) -> U {
//! Retrieves the value in the right branch. Fails if the either is Left.
match eith {
Right(x) => x,
Left(_) => fail!(~"either::unwrap_right Left")
}
}
pub impl<T, U> Either<T, U> {
#[inline(always)]
fn either<V>(&self, f_left: &fn(&T) -> V, f_right: &fn(&U) -> V) -> V {
either(f_left, f_right, self)
}
#[inline(always)]
fn flip(self) -> Either<U, T> { flip(self) }
#[inline(always)]
fn to_result(self) -> Result<U, T> { to_result(self) }
#[inline(always)]
fn is_left(&self) -> bool { is_left(self) }
#[inline(always)]
fn is_right(&self) -> bool { is_right(self) }
#[inline(always)]
fn unwrap_left(self) -> T { unwrap_left(self) }
#[inline(always)]
fn unwrap_right(self) -> U { unwrap_right(self) }
}
#[test]
fn test_either_left() {
let val = Left(10);
fn f_left(x: &int) -> bool
|
fn f_right(_x: &uint) -> bool { false }
assert!((either(f_left, f_right, &val)));
}
#[test]
fn test_either_right() {
let val = Right(10u);
fn f_left(_x: &int) -> bool { false }
fn f_right(x: &uint) -> bool { *x == 10u }
assert!((either(f_left, f_right, &val)));
}
#[test]
fn test_lefts() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let result = lefts(input);
assert_eq!(result, ~[10, 12, 14]);
}
#[test]
fn test_lefts_none() {
let input: ~[Either<int, int>] = ~[Right(10), Right(10)];
let result = lefts(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_lefts_empty() {
let input: ~[Either<int, int>] = ~[];
let result = lefts(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_rights() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let result = rights(input);
assert_eq!(result, ~[11, 13]);
}
#[test]
fn test_rights_none() {
let input: ~[Either<int, int>] = ~[Left(10), Left(10)];
let result = rights(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_rights_empty() {
let input: ~[Either<int, int>] = ~[];
let result = rights(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_partition() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let (lefts, rights) = partition(input);
assert_eq!(lefts[0], 10);
assert_eq!(lefts[1], 12);
assert_eq!(lefts[2], 14);
assert_eq!(rights[0], 11);
assert_eq!(rights[1], 13);
}
#[test]
fn test_partition_no_lefts() {
let input: ~[Either<int, int>] = ~[Right(10), Right(11)];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 0u);
assert_eq!(vec::len(rights), 2u);
}
#[test]
fn test_partition_no_rights() {
let input: ~[Either<int, int>] = ~[Left(10), Left(11)];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 2u);
assert_eq!(vec::len(rights), 0u);
}
#[test]
fn test_partition_empty() {
let input: ~[Either<int, int>] = ~[];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 0u);
assert_eq!(vec::len(rights), 0u);
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
|
{ *x == 10 }
|
identifier_body
|
either.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type that represents one of two alternatives
use cmp::Eq;
use kinds::Copy;
use result::Result;
use result;
use vec;
/// The either type
#[deriving(Clone, Eq)]
pub enum Either<T, U> {
Left(T),
Right(U)
}
#[inline(always)]
pub fn either<T, U, V>(f_left: &fn(&T) -> V,
f_right: &fn(&U) -> V, value: &Either<T, U>) -> V {
/*!
* Applies a function based on the given either value
*
* If `value` is left(T) then `f_left` is applied to its contents, if
* `value` is right(U) then `f_right` is applied to its contents, and the
* result is returned.
*/
match *value {
Left(ref l) => f_left(l),
Right(ref r) => f_right(r)
}
}
pub fn
|
<T:Copy,U>(eithers: &[Either<T, U>]) -> ~[T] {
//! Extracts from a vector of either all the left values
do vec::build_sized(eithers.len()) |push| {
for vec::each(eithers) |elt| {
match *elt {
Left(ref l) => { push(*l); }
_ => { /* fallthrough */ }
}
}
}
}
pub fn rights<T, U: Copy>(eithers: &[Either<T, U>]) -> ~[U] {
//! Extracts from a vector of either all the right values
do vec::build_sized(eithers.len()) |push| {
for vec::each(eithers) |elt| {
match *elt {
Right(ref r) => { push(*r); }
_ => { /* fallthrough */ }
}
}
}
}
pub fn partition<T, U>(eithers: ~[Either<T, U>])
-> (~[T], ~[U]) {
/*!
* Extracts from a vector of either all the left values and right values
*
* Returns a structure containing a vector of left values and a vector of
* right values.
*/
let mut lefts: ~[T] = ~[];
let mut rights: ~[U] = ~[];
do vec::consume(eithers) |_i, elt| {
match elt {
Left(l) => lefts.push(l),
Right(r) => rights.push(r)
}
}
return (lefts, rights);
}
#[inline(always)]
pub fn flip<T, U>(eith: Either<T, U>) -> Either<U, T> {
//! Flips between left and right of a given either
match eith {
Right(r) => Left(r),
Left(l) => Right(l)
}
}
#[inline(always)]
pub fn to_result<T, U>(eith: Either<T, U>)
-> Result<U, T> {
/*!
* Converts either::t to a result::t
*
* Converts an `either` type to a `result` type, making the "right" choice
* an ok result, and the "left" choice a fail
*/
match eith {
Right(r) => result::Ok(r),
Left(l) => result::Err(l)
}
}
#[inline(always)]
pub fn is_left<T, U>(eith: &Either<T, U>) -> bool {
//! Checks whether the given value is a left
match *eith { Left(_) => true, _ => false }
}
#[inline(always)]
pub fn is_right<T, U>(eith: &Either<T, U>) -> bool {
//! Checks whether the given value is a right
match *eith { Right(_) => true, _ => false }
}
#[inline(always)]
pub fn unwrap_left<T,U>(eith: Either<T,U>) -> T {
//! Retrieves the value in the left branch. Fails if the either is Right.
match eith {
Left(x) => x,
Right(_) => fail!(~"either::unwrap_left Right")
}
}
#[inline(always)]
pub fn unwrap_right<T,U>(eith: Either<T,U>) -> U {
//! Retrieves the value in the right branch. Fails if the either is Left.
match eith {
Right(x) => x,
Left(_) => fail!(~"either::unwrap_right Left")
}
}
pub impl<T, U> Either<T, U> {
#[inline(always)]
fn either<V>(&self, f_left: &fn(&T) -> V, f_right: &fn(&U) -> V) -> V {
either(f_left, f_right, self)
}
#[inline(always)]
fn flip(self) -> Either<U, T> { flip(self) }
#[inline(always)]
fn to_result(self) -> Result<U, T> { to_result(self) }
#[inline(always)]
fn is_left(&self) -> bool { is_left(self) }
#[inline(always)]
fn is_right(&self) -> bool { is_right(self) }
#[inline(always)]
fn unwrap_left(self) -> T { unwrap_left(self) }
#[inline(always)]
fn unwrap_right(self) -> U { unwrap_right(self) }
}
#[test]
fn test_either_left() {
let val = Left(10);
fn f_left(x: &int) -> bool { *x == 10 }
fn f_right(_x: &uint) -> bool { false }
assert!((either(f_left, f_right, &val)));
}
#[test]
fn test_either_right() {
let val = Right(10u);
fn f_left(_x: &int) -> bool { false }
fn f_right(x: &uint) -> bool { *x == 10u }
assert!((either(f_left, f_right, &val)));
}
#[test]
fn test_lefts() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let result = lefts(input);
assert_eq!(result, ~[10, 12, 14]);
}
#[test]
fn test_lefts_none() {
let input: ~[Either<int, int>] = ~[Right(10), Right(10)];
let result = lefts(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_lefts_empty() {
let input: ~[Either<int, int>] = ~[];
let result = lefts(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_rights() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let result = rights(input);
assert_eq!(result, ~[11, 13]);
}
#[test]
fn test_rights_none() {
let input: ~[Either<int, int>] = ~[Left(10), Left(10)];
let result = rights(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_rights_empty() {
let input: ~[Either<int, int>] = ~[];
let result = rights(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_partition() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let (lefts, rights) = partition(input);
assert_eq!(lefts[0], 10);
assert_eq!(lefts[1], 12);
assert_eq!(lefts[2], 14);
assert_eq!(rights[0], 11);
assert_eq!(rights[1], 13);
}
#[test]
fn test_partition_no_lefts() {
let input: ~[Either<int, int>] = ~[Right(10), Right(11)];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 0u);
assert_eq!(vec::len(rights), 2u);
}
#[test]
fn test_partition_no_rights() {
let input: ~[Either<int, int>] = ~[Left(10), Left(11)];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 2u);
assert_eq!(vec::len(rights), 0u);
}
#[test]
fn test_partition_empty() {
let input: ~[Either<int, int>] = ~[];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 0u);
assert_eq!(vec::len(rights), 0u);
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
|
lefts
|
identifier_name
|
either.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type that represents one of two alternatives
use cmp::Eq;
use kinds::Copy;
use result::Result;
use result;
use vec;
/// The either type
#[deriving(Clone, Eq)]
pub enum Either<T, U> {
Left(T),
Right(U)
}
#[inline(always)]
pub fn either<T, U, V>(f_left: &fn(&T) -> V,
f_right: &fn(&U) -> V, value: &Either<T, U>) -> V {
/*!
* Applies a function based on the given either value
*
* If `value` is left(T) then `f_left` is applied to its contents, if
* `value` is right(U) then `f_right` is applied to its contents, and the
* result is returned.
*/
match *value {
Left(ref l) => f_left(l),
Right(ref r) => f_right(r)
}
}
pub fn lefts<T:Copy,U>(eithers: &[Either<T, U>]) -> ~[T] {
//! Extracts from a vector of either all the left values
do vec::build_sized(eithers.len()) |push| {
for vec::each(eithers) |elt| {
match *elt {
Left(ref l) => { push(*l); }
_ => { /* fallthrough */ }
}
}
}
}
pub fn rights<T, U: Copy>(eithers: &[Either<T, U>]) -> ~[U] {
//! Extracts from a vector of either all the right values
do vec::build_sized(eithers.len()) |push| {
for vec::each(eithers) |elt| {
match *elt {
Right(ref r) => { push(*r); }
_ => { /* fallthrough */ }
}
}
}
}
pub fn partition<T, U>(eithers: ~[Either<T, U>])
-> (~[T], ~[U]) {
/*!
* Extracts from a vector of either all the left values and right values
*
* Returns a structure containing a vector of left values and a vector of
* right values.
*/
let mut lefts: ~[T] = ~[];
let mut rights: ~[U] = ~[];
do vec::consume(eithers) |_i, elt| {
match elt {
Left(l) => lefts.push(l),
Right(r) => rights.push(r)
}
}
return (lefts, rights);
}
#[inline(always)]
pub fn flip<T, U>(eith: Either<T, U>) -> Either<U, T> {
//! Flips between left and right of a given either
match eith {
Right(r) => Left(r),
Left(l) => Right(l)
}
}
#[inline(always)]
pub fn to_result<T, U>(eith: Either<T, U>)
-> Result<U, T> {
/*!
* Converts either::t to a result::t
*
* Converts an `either` type to a `result` type, making the "right" choice
* an ok result, and the "left" choice a fail
*/
match eith {
Right(r) => result::Ok(r),
Left(l) => result::Err(l)
}
}
#[inline(always)]
pub fn is_left<T, U>(eith: &Either<T, U>) -> bool {
//! Checks whether the given value is a left
match *eith { Left(_) => true, _ => false }
}
#[inline(always)]
pub fn is_right<T, U>(eith: &Either<T, U>) -> bool {
//! Checks whether the given value is a right
match *eith { Right(_) => true, _ => false }
}
#[inline(always)]
pub fn unwrap_left<T,U>(eith: Either<T,U>) -> T {
//! Retrieves the value in the left branch. Fails if the either is Right.
match eith {
Left(x) => x,
Right(_) => fail!(~"either::unwrap_left Right")
}
}
#[inline(always)]
pub fn unwrap_right<T,U>(eith: Either<T,U>) -> U {
//! Retrieves the value in the right branch. Fails if the either is Left.
match eith {
Right(x) => x,
Left(_) => fail!(~"either::unwrap_right Left")
}
}
pub impl<T, U> Either<T, U> {
#[inline(always)]
fn either<V>(&self, f_left: &fn(&T) -> V, f_right: &fn(&U) -> V) -> V {
either(f_left, f_right, self)
}
#[inline(always)]
fn flip(self) -> Either<U, T> { flip(self) }
#[inline(always)]
fn to_result(self) -> Result<U, T> { to_result(self) }
#[inline(always)]
fn is_left(&self) -> bool { is_left(self) }
#[inline(always)]
fn is_right(&self) -> bool { is_right(self) }
#[inline(always)]
fn unwrap_left(self) -> T { unwrap_left(self) }
#[inline(always)]
fn unwrap_right(self) -> U { unwrap_right(self) }
}
#[test]
fn test_either_left() {
let val = Left(10);
fn f_left(x: &int) -> bool { *x == 10 }
fn f_right(_x: &uint) -> bool { false }
assert!((either(f_left, f_right, &val)));
}
|
fn f_left(_x: &int) -> bool { false }
fn f_right(x: &uint) -> bool { *x == 10u }
assert!((either(f_left, f_right, &val)));
}
#[test]
fn test_lefts() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let result = lefts(input);
assert_eq!(result, ~[10, 12, 14]);
}
#[test]
fn test_lefts_none() {
let input: ~[Either<int, int>] = ~[Right(10), Right(10)];
let result = lefts(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_lefts_empty() {
let input: ~[Either<int, int>] = ~[];
let result = lefts(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_rights() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let result = rights(input);
assert_eq!(result, ~[11, 13]);
}
#[test]
fn test_rights_none() {
let input: ~[Either<int, int>] = ~[Left(10), Left(10)];
let result = rights(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_rights_empty() {
let input: ~[Either<int, int>] = ~[];
let result = rights(input);
assert_eq!(vec::len(result), 0u);
}
#[test]
fn test_partition() {
let input = ~[Left(10), Right(11), Left(12), Right(13), Left(14)];
let (lefts, rights) = partition(input);
assert_eq!(lefts[0], 10);
assert_eq!(lefts[1], 12);
assert_eq!(lefts[2], 14);
assert_eq!(rights[0], 11);
assert_eq!(rights[1], 13);
}
#[test]
fn test_partition_no_lefts() {
let input: ~[Either<int, int>] = ~[Right(10), Right(11)];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 0u);
assert_eq!(vec::len(rights), 2u);
}
#[test]
fn test_partition_no_rights() {
let input: ~[Either<int, int>] = ~[Left(10), Left(11)];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 2u);
assert_eq!(vec::len(rights), 0u);
}
#[test]
fn test_partition_empty() {
let input: ~[Either<int, int>] = ~[];
let (lefts, rights) = partition(input);
assert_eq!(vec::len(lefts), 0u);
assert_eq!(vec::len(rights), 0u);
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
|
#[test]
fn test_either_right() {
let val = Right(10u);
|
random_line_split
|
dns_2.rs
|
extern crate martin;
use martin::*;
#[test]
fn parse_query() {
let data = include_bytes!("../assets/captures/dns_2_query.bin");
let question = Question::new("google.com.", QType::ByType(Type::AAAA), Class::Internet)
|
let msg = Message::parse(&data[..]).unwrap();
assert!(msg.is_query());
assert_eq!(msg.id(), 3);
assert_eq!(msg.opcode(), Opcode::Query);
assert_eq!(msg.questions, vec![question]);
}
#[test]
fn parse_response() {
let data = include_bytes!("../assets/captures/dns_2_response.bin");
let question = Question::new("google.com.", QType::ByType(Type::AAAA), Class::Internet)
.unwrap();
let rr = ResourceRecord::AAAA {
name: "google.com.".parse().unwrap(),
class: Class::Internet,
ttl: 299,
addr: "2607:f8b0:400a:809::200e".parse().unwrap(),
};
let msg = Message::parse(&data[..]).unwrap();
assert!(msg.is_response());
assert_eq!(msg.id(), 3);
assert_eq!(msg.opcode(), Opcode::Query);
assert_eq!(msg.questions, vec![question]);
assert_eq!(msg.answers, vec![rr]);
}
|
.unwrap();
|
random_line_split
|
dns_2.rs
|
extern crate martin;
use martin::*;
#[test]
fn parse_query()
|
#[test]
fn parse_response() {
let data = include_bytes!("../assets/captures/dns_2_response.bin");
let question = Question::new("google.com.", QType::ByType(Type::AAAA), Class::Internet)
.unwrap();
let rr = ResourceRecord::AAAA {
name: "google.com.".parse().unwrap(),
class: Class::Internet,
ttl: 299,
addr: "2607:f8b0:400a:809::200e".parse().unwrap(),
};
let msg = Message::parse(&data[..]).unwrap();
assert!(msg.is_response());
assert_eq!(msg.id(), 3);
assert_eq!(msg.opcode(), Opcode::Query);
assert_eq!(msg.questions, vec![question]);
assert_eq!(msg.answers, vec![rr]);
}
|
{
let data = include_bytes!("../assets/captures/dns_2_query.bin");
let question = Question::new("google.com.", QType::ByType(Type::AAAA), Class::Internet)
.unwrap();
let msg = Message::parse(&data[..]).unwrap();
assert!(msg.is_query());
assert_eq!(msg.id(), 3);
assert_eq!(msg.opcode(), Opcode::Query);
assert_eq!(msg.questions, vec![question]);
}
|
identifier_body
|
dns_2.rs
|
extern crate martin;
use martin::*;
#[test]
fn
|
() {
let data = include_bytes!("../assets/captures/dns_2_query.bin");
let question = Question::new("google.com.", QType::ByType(Type::AAAA), Class::Internet)
.unwrap();
let msg = Message::parse(&data[..]).unwrap();
assert!(msg.is_query());
assert_eq!(msg.id(), 3);
assert_eq!(msg.opcode(), Opcode::Query);
assert_eq!(msg.questions, vec![question]);
}
#[test]
fn parse_response() {
let data = include_bytes!("../assets/captures/dns_2_response.bin");
let question = Question::new("google.com.", QType::ByType(Type::AAAA), Class::Internet)
.unwrap();
let rr = ResourceRecord::AAAA {
name: "google.com.".parse().unwrap(),
class: Class::Internet,
ttl: 299,
addr: "2607:f8b0:400a:809::200e".parse().unwrap(),
};
let msg = Message::parse(&data[..]).unwrap();
assert!(msg.is_response());
assert_eq!(msg.id(), 3);
assert_eq!(msg.opcode(), Opcode::Query);
assert_eq!(msg.questions, vec![question]);
assert_eq!(msg.answers, vec![rr]);
}
|
parse_query
|
identifier_name
|
day02.rs
|
use std::io::Read;
use crate::common::LineIter;
enum Dir {
Up,
Down,
Forward,
}
fn parse_input(input: &mut dyn Read) -> Vec<(Dir, i32)> {
let mut reader = LineIter::new(input);
let mut moves = Vec::new();
while let Some(line) = reader.next() {
let (dir, amount) = line.split_once(' ').unwrap();
let dir = match dir {
"up" => Dir::Up,
"down" => Dir::Down,
"forward" => Dir::Forward,
_ => panic!("Invalid direction '{}'", dir),
};
moves.push((dir, amount.parse().unwrap()));
}
moves
}
pub fn part1(input: &mut dyn Read) -> String {
let moves = parse_input(input);
let mut x = 0;
let mut depth = 0;
for (dir, amount) in moves {
match dir {
Dir::Up => depth -= amount,
Dir::Down => depth += amount,
Dir::Forward => x += amount,
}
}
(x * depth).to_string()
}
pub fn part2(input: &mut dyn Read) -> String {
let moves = parse_input(input);
let mut x = 0;
let mut depth = 0;
let mut aim = 0;
for (dir, amount) in moves {
match dir {
Dir::Up => aim -= amount,
Dir::Down => aim += amount,
Dir::Forward => {
x += amount;
depth += aim * amount;
}
}
}
(x * depth).to_string()
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("samples/02.txt");
#[test]
fn
|
() {
test_implementation(part1, SAMPLE, 150);
}
#[test]
fn sample_part2() {
test_implementation(part1, SAMPLE, 150);
}
}
|
sample_part1
|
identifier_name
|
day02.rs
|
use std::io::Read;
use crate::common::LineIter;
enum Dir {
Up,
Down,
Forward,
}
fn parse_input(input: &mut dyn Read) -> Vec<(Dir, i32)> {
let mut reader = LineIter::new(input);
let mut moves = Vec::new();
while let Some(line) = reader.next() {
let (dir, amount) = line.split_once(' ').unwrap();
let dir = match dir {
"up" => Dir::Up,
"down" => Dir::Down,
"forward" => Dir::Forward,
_ => panic!("Invalid direction '{}'", dir),
};
moves.push((dir, amount.parse().unwrap()));
}
moves
}
pub fn part1(input: &mut dyn Read) -> String
|
pub fn part2(input: &mut dyn Read) -> String {
let moves = parse_input(input);
let mut x = 0;
let mut depth = 0;
let mut aim = 0;
for (dir, amount) in moves {
match dir {
Dir::Up => aim -= amount,
Dir::Down => aim += amount,
Dir::Forward => {
x += amount;
depth += aim * amount;
}
}
}
(x * depth).to_string()
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("samples/02.txt");
#[test]
fn sample_part1() {
test_implementation(part1, SAMPLE, 150);
}
#[test]
fn sample_part2() {
test_implementation(part1, SAMPLE, 150);
}
}
|
{
let moves = parse_input(input);
let mut x = 0;
let mut depth = 0;
for (dir, amount) in moves {
match dir {
Dir::Up => depth -= amount,
Dir::Down => depth += amount,
Dir::Forward => x += amount,
}
}
(x * depth).to_string()
}
|
identifier_body
|
day02.rs
|
use std::io::Read;
use crate::common::LineIter;
enum Dir {
Up,
Down,
Forward,
}
fn parse_input(input: &mut dyn Read) -> Vec<(Dir, i32)> {
let mut reader = LineIter::new(input);
let mut moves = Vec::new();
while let Some(line) = reader.next() {
let (dir, amount) = line.split_once(' ').unwrap();
let dir = match dir {
"up" => Dir::Up,
"down" => Dir::Down,
"forward" => Dir::Forward,
_ => panic!("Invalid direction '{}'", dir),
};
moves.push((dir, amount.parse().unwrap()));
}
|
pub fn part1(input: &mut dyn Read) -> String {
let moves = parse_input(input);
let mut x = 0;
let mut depth = 0;
for (dir, amount) in moves {
match dir {
Dir::Up => depth -= amount,
Dir::Down => depth += amount,
Dir::Forward => x += amount,
}
}
(x * depth).to_string()
}
pub fn part2(input: &mut dyn Read) -> String {
let moves = parse_input(input);
let mut x = 0;
let mut depth = 0;
let mut aim = 0;
for (dir, amount) in moves {
match dir {
Dir::Up => aim -= amount,
Dir::Down => aim += amount,
Dir::Forward => {
x += amount;
depth += aim * amount;
}
}
}
(x * depth).to_string()
}
#[cfg(test)]
mod tests {
use crate::test_implementation;
use super::*;
const SAMPLE: &[u8] = include_bytes!("samples/02.txt");
#[test]
fn sample_part1() {
test_implementation(part1, SAMPLE, 150);
}
#[test]
fn sample_part2() {
test_implementation(part1, SAMPLE, 150);
}
}
|
moves
}
|
random_line_split
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::characterdata::CharacterData;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::{ChildrenMutation, Node};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
Node::reflect_node(box HTMLTitleElement::new_inherited(local_name, prefix, document),
document,
HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn Text(&self) -> DOMString {
let mut content = String::new();
for child in self.upcast::<Node>().children() {
if let Some(text) = child.downcast::<Text>() {
content.push_str(&text.upcast::<CharacterData>().data());
}
}
DOMString::from(content)
}
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation)
|
fn bind_to_tree(&self, is_in_doc: bool) {
let node = self.upcast::<Node>();
if is_in_doc {
node.owner_doc().title_changed();
}
}
}
|
{
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = self.upcast::<Node>();
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
|
identifier_body
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::characterdata::CharacterData;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::{ChildrenMutation, Node};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
Node::reflect_node(box HTMLTitleElement::new_inherited(local_name, prefix, document),
document,
HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn Text(&self) -> DOMString {
let mut content = String::new();
for child in self.upcast::<Node>().children() {
if let Some(text) = child.downcast::<Text>()
|
}
DOMString::from(content)
}
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = self.upcast::<Node>();
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = self.upcast::<Node>();
if is_in_doc {
node.owner_doc().title_changed();
}
}
}
|
{
content.push_str(&text.upcast::<CharacterData>().data());
}
|
conditional_block
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::characterdata::CharacterData;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::{ChildrenMutation, Node};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct
|
{
htmlelement: HTMLElement,
}
impl HTMLTitleElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
Node::reflect_node(box HTMLTitleElement::new_inherited(local_name, prefix, document),
document,
HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn Text(&self) -> DOMString {
let mut content = String::new();
for child in self.upcast::<Node>().children() {
if let Some(text) = child.downcast::<Text>() {
content.push_str(&text.upcast::<CharacterData>().data());
}
}
DOMString::from(content)
}
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = self.upcast::<Node>();
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = self.upcast::<Node>();
if is_in_doc {
node.owner_doc().title_changed();
}
}
}
|
HTMLTitleElement
|
identifier_name
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
|
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
Node::reflect_node(box HTMLTitleElement::new_inherited(local_name, prefix, document),
document,
HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn Text(&self) -> DOMString {
let mut content = String::new();
for child in self.upcast::<Node>().children() {
if let Some(text) = child.downcast::<Text>() {
content.push_str(&text.upcast::<CharacterData>().data());
}
}
DOMString::from(content)
}
// https://html.spec.whatwg.org/multipage/#dom-title-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = self.upcast::<Node>();
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = self.upcast::<Node>();
if is_in_doc {
node.owner_doc().title_changed();
}
}
}
|
use dom::bindings::str::DOMString;
use dom::characterdata::CharacterData;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::{ChildrenMutation, Node};
|
random_line_split
|
sync.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Synchronous channels/ports
///
/// This channel implementation differs significantly from the asynchronous
/// implementations found next to it (oneshot/stream/share). This is an
/// implementation of a synchronous, bounded buffer channel.
///
/// Each channel is created with some amount of backing buffer, and sends will
/// *block* until buffer space becomes available. A buffer size of 0 is valid,
/// which means that every successful send is paired with a successful recv.
///
/// This flavor of channels defines a new `send_opt` method for channels which
/// is the method by which a message is sent but the task does not panic if it
/// cannot be delivered.
///
/// Another major difference is that send() will *always* return back the data
/// if it couldn't be sent. This is because it is deterministically known when
/// the data is received and when it is not received.
///
/// Implementation-wise, it can all be summed up with "use a mutex plus some
/// logic". The mutex used here is an OS native mutex, meaning that no user code
/// is run inside of the mutex (to prevent context switching). This
/// implementation shares almost all code for the buffered and unbuffered cases
/// of a synchronous channel. There are a few branches for the unbuffered case,
/// but they're mostly just relevant to blocking senders.
use core::prelude::*;
pub use self::Failure::*;
use self::Blocker::*;
use vec::Vec;
use core::mem;
use core::ptr;
use sync::atomic::{Ordering, AtomicUsize};
use sync::mpsc::blocking::{self, WaitToken, SignalToken};
use sync::mpsc::select::StartResult::{self, Installed, Abort};
use sync::{Mutex, MutexGuard};
pub struct Packet<T> {
/// Only field outside of the mutex. Just done for kicks, but mainly because
/// the other shared channel already had the code implemented
channels: AtomicUsize,
lock: Mutex<State<T>>,
}
unsafe impl<T: Send> Send for Packet<T> { }
unsafe impl<T: Send> Sync for Packet<T> { }
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
blocker: Blocker, // currently blocked task on this channel
buf: Buffer<T>, // storage for buffered messages
cap: usize, // capacity of this channel
/// A curious flag used to indicate whether a sender failed or succeeded in
/// blocking. This is used to transmit information back to the task that it
/// must dequeue its message from the buffer because it was not received.
/// This is only relevant in the 0-buffer case. This obviously cannot be
/// safely constructed, but it's guaranteed to always have a valid pointer
/// value.
canceled: Option<&'static mut bool>,
}
unsafe impl<T: Send> Send for State<T> {}
/// Possible flavors of threads who can be blocked on this channel.
enum Blocker {
BlockedSender(SignalToken),
BlockedReceiver(SignalToken),
NoneBlocked
}
/// Simple queue for threading tasks together. Nodes are stack-allocated, so
/// this structure is not safe at all
struct Queue {
head: *mut Node,
tail: *mut Node,
}
struct Node {
token: Option<SignalToken>,
next: *mut Node,
}
unsafe impl Send for Node {}
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
start: usize,
size: usize,
}
#[derive(Debug)]
pub enum Failure {
Empty,
Disconnected,
}
/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
/// in the meantime. This re-locks the mutex upon returning.
fn wait<'a, 'b, T>(lock: &'a Mutex<State<T>>,
mut guard: MutexGuard<'b, State<T>>,
f: fn(SignalToken) -> Blocker)
-> MutexGuard<'a, State<T>>
{
let (wait_token, signal_token) = blocking::tokens();
match mem::replace(&mut guard.blocker, f(signal_token)) {
NoneBlocked => {}
_ => unreachable!(),
}
drop(guard); // unlock
wait_token.wait(); // block
lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) {
// We need to be careful to wake up the waiting task *outside* of the mutex
// in case it incurs a context switch.
drop(guard);
token.signal();
}
impl<T> Packet<T> {
pub fn new(cap: usize) -> Packet<T> {
Packet {
channels: AtomicUsize::new(1),
lock: Mutex::new(State {
disconnected: false,
blocker: NoneBlocked,
cap: cap,
canceled: None,
queue: Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
},
buf: Buffer {
buf: (0..cap + if cap == 0 {1} else {0}).map(|_| None).collect(),
start: 0,
size: 0,
},
}),
}
}
// wait until a send slot is available, returning locked access to
// the channel state.
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: ptr::null_mut() };
loop {
let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
// no room; actually block
let wait_token = guard.queue.enqueue(&mut node);
drop(guard);
wait_token.wait();
}
}
pub fn send(&self, t: T) -> Result<(), T> {
let mut guard = self.acquire_send_slot();
if guard.disconnected { return Err(t) }
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
// if our capacity is 0, then we need to wait for a receiver to be
// available to take our data. After waiting, we check again to make
// sure the port didn't go away in the meantime. If it did, we need
// to hand back our data.
NoneBlocked if guard.cap == 0 => {
let mut canceled = false;
assert!(guard.canceled.is_none());
guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
let mut guard = wait(&self.lock, guard, BlockedSender);
if canceled {Err(guard.buf.dequeue())} else {Ok(())}
}
// success, we buffered some data
NoneBlocked => Ok(()),
// success, someone's about to receive our buffered data.
BlockedReceiver(token) => { wakeup(token, guard); Ok(()) }
BlockedSender(..) => panic!("lolwut"),
}
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::TrySendError::Disconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
Err(super::TrySendError::Full(t))
} else if guard.cap == 0 {
// With capacity 0, even though we have buffer space we can't
// transfer the data unless there's a receiver waiting.
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => Err(super::TrySendError::Full(t)),
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => {
guard.buf.enqueue(t);
wakeup(token, guard);
Ok(())
}
}
} else {
// If the buffer has some space and the capacity isn't 0, then we
// just enqueue the data for later retrieval, ensuring to wake up
// any blocked receiver if there is one.
assert!(guard.buf.size() < guard.buf.cap());
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
BlockedReceiver(token) => wakeup(token, guard),
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
}
Ok(())
}
}
// Receives a message from this channel
//
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
let mut waited = false;
if!guard.disconnected && guard.buf.size() == 0 {
guard = wait(&self.lock, guard, BlockedReceiver);
waited = true;
}
if guard.disconnected && guard.buf.size() == 0 { return Err(()) }
// Pick up the data, wake up our neighbors, and carry on
assert!(guard.buf.size() > 0);
let ret = guard.buf.dequeue();
self.wakeup_senders(waited, guard);
return Ok(ret);
}
pub fn try_recv(&self) -> Result<T, Failure> {
let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
if guard.buf.size() == 0 { return Err(Empty) }
// Be sure to wake up neighbors
let ret = Ok(guard.buf.dequeue());
self.wakeup_senders(false, guard);
return ret;
}
// Wake up pending senders after some data has been received
//
// * `waited` - flag if the receiver blocked to receive some data, or if it
// just picked up some data on the way out
// * `guard` - the lock guard that is held over this channel's lock
fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<State<T>>)
|
// only outside of the lock do we wake up the pending tasks
pending_sender1.map(|t| t.signal());
pending_sender2.map(|t| t.signal());
}
// Prepares this shared packet for a channel clone, essentially just bumping
// a refcount.
pub fn clone_chan(&self) {
self.channels.fetch_add(1, Ordering::SeqCst);
}
pub fn drop_chan(&self) {
// Only flag the channel as disconnected if we're the last channel
match self.channels.fetch_sub(1, Ordering::SeqCst) {
1 => {}
_ => return
}
// Not much to do other than wake up a receiver if one's there
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => wakeup(token, guard),
}
}
pub fn drop_port(&self) {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If the capacity is 0, then the sender may want its data back after
// we're disconnected. Otherwise it's now our responsibility to destroy
// the buffered data. As with many other portions of this code, this
// needs to be careful to destroy the data *outside* of the lock to
// prevent deadlock.
let _data = if guard.cap!= 0 {
mem::replace(&mut guard.buf.buf, Vec::new())
} else {
Vec::new()
};
let mut queue = mem::replace(&mut guard.queue, Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
});
let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedSender(token) => {
*guard.canceled.take().unwrap() = true;
Some(token)
}
BlockedReceiver(..) => unreachable!(),
};
mem::drop(guard);
loop {
match queue.dequeue() {
Some(token) => { token.signal(); }
None => break,
}
}
waiter.map(|t| t.signal());
}
////////////////////////////////////////////////////////////////////////////
// select implementation
////////////////////////////////////////////////////////////////////////////
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
match mem::replace(&mut guard.blocker, BlockedReceiver(token)) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(..) => unreachable!(),
}
Installed
}
}
// Remove a previous selecting task from this port. This ensures that the
// blocked task will no longer be visible to any other threads.
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
guard.blocker = BlockedSender(token);
true
}
BlockedReceiver(token) => { drop(token); false }
}
}
}
#[unsafe_destructor]
impl<T> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(Ordering::SeqCst), 0);
let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
}
////////////////////////////////////////////////////////////////////////////////
// Buffer, a simple ring buffer backed by Vec<T>
////////////////////////////////////////////////////////////////////////////////
impl<T> Buffer<T> {
fn enqueue(&mut self, t: T) {
let pos = (self.start + self.size) % self.buf.len();
self.size += 1;
let prev = mem::replace(&mut self.buf[pos], Some(t));
assert!(prev.is_none());
}
fn dequeue(&mut self) -> T {
let start = self.start;
self.size -= 1;
self.start = (self.start + 1) % self.buf.len();
let result = &mut self.buf[start];
result.take().unwrap()
}
fn size(&self) -> usize { self.size }
fn cap(&self) -> usize { self.buf.len() }
}
////////////////////////////////////////////////////////////////////////////////
// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
////////////////////////////////////////////////////////////////////////////////
impl Queue {
fn enqueue(&mut self, node: &mut Node) -> WaitToken {
let (wait_token, signal_token) = blocking::tokens();
node.token = Some(signal_token);
node.next = ptr::null_mut();
if self.tail.is_null() {
self.head = node as *mut Node;
self.tail = node as *mut Node;
} else {
unsafe {
(*self.tail).next = node as *mut Node;
self.tail = node as *mut Node;
}
}
wait_token
}
fn dequeue(&mut self) -> Option<SignalToken> {
if self.head.is_null() {
return None
}
let node = self.head;
self.head = unsafe { (*node).next };
if self.head.is_null() {
self.tail = ptr::null_mut();
}
unsafe {
(*node).next = ptr::null_mut();
Some((*node).token.take().unwrap())
}
}
}
|
{
let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
// If this is a no-buffer channel (cap == 0), then if we didn't wait we
// need to ACK the sender. If we waited, then the sender waking us up
// was already the ACK.
let pending_sender2 = if guard.cap == 0 && !waited {
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedReceiver(..) => unreachable!(),
BlockedSender(token) => {
guard.canceled.take();
Some(token)
}
}
} else {
None
};
mem::drop(guard);
|
identifier_body
|
sync.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Synchronous channels/ports
///
/// This channel implementation differs significantly from the asynchronous
/// implementations found next to it (oneshot/stream/share). This is an
/// implementation of a synchronous, bounded buffer channel.
///
/// Each channel is created with some amount of backing buffer, and sends will
/// *block* until buffer space becomes available. A buffer size of 0 is valid,
/// which means that every successful send is paired with a successful recv.
///
/// This flavor of channels defines a new `send_opt` method for channels which
/// is the method by which a message is sent but the task does not panic if it
/// cannot be delivered.
///
/// Another major difference is that send() will *always* return back the data
/// if it couldn't be sent. This is because it is deterministically known when
/// the data is received and when it is not received.
///
/// Implementation-wise, it can all be summed up with "use a mutex plus some
/// logic". The mutex used here is an OS native mutex, meaning that no user code
/// is run inside of the mutex (to prevent context switching). This
/// implementation shares almost all code for the buffered and unbuffered cases
/// of a synchronous channel. There are a few branches for the unbuffered case,
/// but they're mostly just relevant to blocking senders.
use core::prelude::*;
pub use self::Failure::*;
use self::Blocker::*;
use vec::Vec;
use core::mem;
use core::ptr;
use sync::atomic::{Ordering, AtomicUsize};
use sync::mpsc::blocking::{self, WaitToken, SignalToken};
use sync::mpsc::select::StartResult::{self, Installed, Abort};
use sync::{Mutex, MutexGuard};
pub struct
|
<T> {
/// Only field outside of the mutex. Just done for kicks, but mainly because
/// the other shared channel already had the code implemented
channels: AtomicUsize,
lock: Mutex<State<T>>,
}
unsafe impl<T: Send> Send for Packet<T> { }
unsafe impl<T: Send> Sync for Packet<T> { }
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
blocker: Blocker, // currently blocked task on this channel
buf: Buffer<T>, // storage for buffered messages
cap: usize, // capacity of this channel
/// A curious flag used to indicate whether a sender failed or succeeded in
/// blocking. This is used to transmit information back to the task that it
/// must dequeue its message from the buffer because it was not received.
/// This is only relevant in the 0-buffer case. This obviously cannot be
/// safely constructed, but it's guaranteed to always have a valid pointer
/// value.
canceled: Option<&'static mut bool>,
}
unsafe impl<T: Send> Send for State<T> {}
/// Possible flavors of threads who can be blocked on this channel.
enum Blocker {
BlockedSender(SignalToken),
BlockedReceiver(SignalToken),
NoneBlocked
}
/// Simple queue for threading tasks together. Nodes are stack-allocated, so
/// this structure is not safe at all
struct Queue {
head: *mut Node,
tail: *mut Node,
}
struct Node {
token: Option<SignalToken>,
next: *mut Node,
}
unsafe impl Send for Node {}
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
start: usize,
size: usize,
}
#[derive(Debug)]
pub enum Failure {
Empty,
Disconnected,
}
/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
/// in the meantime. This re-locks the mutex upon returning.
fn wait<'a, 'b, T>(lock: &'a Mutex<State<T>>,
mut guard: MutexGuard<'b, State<T>>,
f: fn(SignalToken) -> Blocker)
-> MutexGuard<'a, State<T>>
{
let (wait_token, signal_token) = blocking::tokens();
match mem::replace(&mut guard.blocker, f(signal_token)) {
NoneBlocked => {}
_ => unreachable!(),
}
drop(guard); // unlock
wait_token.wait(); // block
lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) {
// We need to be careful to wake up the waiting task *outside* of the mutex
// in case it incurs a context switch.
drop(guard);
token.signal();
}
impl<T> Packet<T> {
pub fn new(cap: usize) -> Packet<T> {
Packet {
channels: AtomicUsize::new(1),
lock: Mutex::new(State {
disconnected: false,
blocker: NoneBlocked,
cap: cap,
canceled: None,
queue: Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
},
buf: Buffer {
buf: (0..cap + if cap == 0 {1} else {0}).map(|_| None).collect(),
start: 0,
size: 0,
},
}),
}
}
// wait until a send slot is available, returning locked access to
// the channel state.
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: ptr::null_mut() };
loop {
let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
// no room; actually block
let wait_token = guard.queue.enqueue(&mut node);
drop(guard);
wait_token.wait();
}
}
pub fn send(&self, t: T) -> Result<(), T> {
let mut guard = self.acquire_send_slot();
if guard.disconnected { return Err(t) }
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
// if our capacity is 0, then we need to wait for a receiver to be
// available to take our data. After waiting, we check again to make
// sure the port didn't go away in the meantime. If it did, we need
// to hand back our data.
NoneBlocked if guard.cap == 0 => {
let mut canceled = false;
assert!(guard.canceled.is_none());
guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
let mut guard = wait(&self.lock, guard, BlockedSender);
if canceled {Err(guard.buf.dequeue())} else {Ok(())}
}
// success, we buffered some data
NoneBlocked => Ok(()),
// success, someone's about to receive our buffered data.
BlockedReceiver(token) => { wakeup(token, guard); Ok(()) }
BlockedSender(..) => panic!("lolwut"),
}
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::TrySendError::Disconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
Err(super::TrySendError::Full(t))
} else if guard.cap == 0 {
// With capacity 0, even though we have buffer space we can't
// transfer the data unless there's a receiver waiting.
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => Err(super::TrySendError::Full(t)),
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => {
guard.buf.enqueue(t);
wakeup(token, guard);
Ok(())
}
}
} else {
// If the buffer has some space and the capacity isn't 0, then we
// just enqueue the data for later retrieval, ensuring to wake up
// any blocked receiver if there is one.
assert!(guard.buf.size() < guard.buf.cap());
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
BlockedReceiver(token) => wakeup(token, guard),
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
}
Ok(())
}
}
// Receives a message from this channel
//
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
let mut waited = false;
if!guard.disconnected && guard.buf.size() == 0 {
guard = wait(&self.lock, guard, BlockedReceiver);
waited = true;
}
if guard.disconnected && guard.buf.size() == 0 { return Err(()) }
// Pick up the data, wake up our neighbors, and carry on
assert!(guard.buf.size() > 0);
let ret = guard.buf.dequeue();
self.wakeup_senders(waited, guard);
return Ok(ret);
}
pub fn try_recv(&self) -> Result<T, Failure> {
let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
if guard.buf.size() == 0 { return Err(Empty) }
// Be sure to wake up neighbors
let ret = Ok(guard.buf.dequeue());
self.wakeup_senders(false, guard);
return ret;
}
// Wake up pending senders after some data has been received
//
// * `waited` - flag if the receiver blocked to receive some data, or if it
// just picked up some data on the way out
// * `guard` - the lock guard that is held over this channel's lock
fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<State<T>>) {
let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
// If this is a no-buffer channel (cap == 0), then if we didn't wait we
// need to ACK the sender. If we waited, then the sender waking us up
// was already the ACK.
let pending_sender2 = if guard.cap == 0 &&!waited {
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedReceiver(..) => unreachable!(),
BlockedSender(token) => {
guard.canceled.take();
Some(token)
}
}
} else {
None
};
mem::drop(guard);
// only outside of the lock do we wake up the pending tasks
pending_sender1.map(|t| t.signal());
pending_sender2.map(|t| t.signal());
}
// Prepares this shared packet for a channel clone, essentially just bumping
// a refcount.
pub fn clone_chan(&self) {
self.channels.fetch_add(1, Ordering::SeqCst);
}
pub fn drop_chan(&self) {
// Only flag the channel as disconnected if we're the last channel
match self.channels.fetch_sub(1, Ordering::SeqCst) {
1 => {}
_ => return
}
// Not much to do other than wake up a receiver if one's there
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => wakeup(token, guard),
}
}
pub fn drop_port(&self) {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If the capacity is 0, then the sender may want its data back after
// we're disconnected. Otherwise it's now our responsibility to destroy
// the buffered data. As with many other portions of this code, this
// needs to be careful to destroy the data *outside* of the lock to
// prevent deadlock.
let _data = if guard.cap!= 0 {
mem::replace(&mut guard.buf.buf, Vec::new())
} else {
Vec::new()
};
let mut queue = mem::replace(&mut guard.queue, Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
});
let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedSender(token) => {
*guard.canceled.take().unwrap() = true;
Some(token)
}
BlockedReceiver(..) => unreachable!(),
};
mem::drop(guard);
loop {
match queue.dequeue() {
Some(token) => { token.signal(); }
None => break,
}
}
waiter.map(|t| t.signal());
}
////////////////////////////////////////////////////////////////////////////
// select implementation
////////////////////////////////////////////////////////////////////////////
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
match mem::replace(&mut guard.blocker, BlockedReceiver(token)) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(..) => unreachable!(),
}
Installed
}
}
// Remove a previous selecting task from this port. This ensures that the
// blocked task will no longer be visible to any other threads.
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
guard.blocker = BlockedSender(token);
true
}
BlockedReceiver(token) => { drop(token); false }
}
}
}
#[unsafe_destructor]
impl<T> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(Ordering::SeqCst), 0);
let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
}
////////////////////////////////////////////////////////////////////////////////
// Buffer, a simple ring buffer backed by Vec<T>
////////////////////////////////////////////////////////////////////////////////
impl<T> Buffer<T> {
fn enqueue(&mut self, t: T) {
let pos = (self.start + self.size) % self.buf.len();
self.size += 1;
let prev = mem::replace(&mut self.buf[pos], Some(t));
assert!(prev.is_none());
}
fn dequeue(&mut self) -> T {
let start = self.start;
self.size -= 1;
self.start = (self.start + 1) % self.buf.len();
let result = &mut self.buf[start];
result.take().unwrap()
}
fn size(&self) -> usize { self.size }
fn cap(&self) -> usize { self.buf.len() }
}
////////////////////////////////////////////////////////////////////////////////
// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
////////////////////////////////////////////////////////////////////////////////
impl Queue {
fn enqueue(&mut self, node: &mut Node) -> WaitToken {
let (wait_token, signal_token) = blocking::tokens();
node.token = Some(signal_token);
node.next = ptr::null_mut();
if self.tail.is_null() {
self.head = node as *mut Node;
self.tail = node as *mut Node;
} else {
unsafe {
(*self.tail).next = node as *mut Node;
self.tail = node as *mut Node;
}
}
wait_token
}
fn dequeue(&mut self) -> Option<SignalToken> {
if self.head.is_null() {
return None
}
let node = self.head;
self.head = unsafe { (*node).next };
if self.head.is_null() {
self.tail = ptr::null_mut();
}
unsafe {
(*node).next = ptr::null_mut();
Some((*node).token.take().unwrap())
}
}
}
|
Packet
|
identifier_name
|
sync.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Synchronous channels/ports
///
/// This channel implementation differs significantly from the asynchronous
/// implementations found next to it (oneshot/stream/share). This is an
/// implementation of a synchronous, bounded buffer channel.
///
/// Each channel is created with some amount of backing buffer, and sends will
/// *block* until buffer space becomes available. A buffer size of 0 is valid,
/// which means that every successful send is paired with a successful recv.
///
/// This flavor of channels defines a new `send_opt` method for channels which
/// is the method by which a message is sent but the task does not panic if it
/// cannot be delivered.
///
/// Another major difference is that send() will *always* return back the data
/// if it couldn't be sent. This is because it is deterministically known when
/// the data is received and when it is not received.
///
/// Implementation-wise, it can all be summed up with "use a mutex plus some
/// logic". The mutex used here is an OS native mutex, meaning that no user code
/// is run inside of the mutex (to prevent context switching). This
/// implementation shares almost all code for the buffered and unbuffered cases
/// of a synchronous channel. There are a few branches for the unbuffered case,
/// but they're mostly just relevant to blocking senders.
use core::prelude::*;
pub use self::Failure::*;
use self::Blocker::*;
use vec::Vec;
use core::mem;
use core::ptr;
use sync::atomic::{Ordering, AtomicUsize};
use sync::mpsc::blocking::{self, WaitToken, SignalToken};
use sync::mpsc::select::StartResult::{self, Installed, Abort};
use sync::{Mutex, MutexGuard};
pub struct Packet<T> {
/// Only field outside of the mutex. Just done for kicks, but mainly because
/// the other shared channel already had the code implemented
channels: AtomicUsize,
lock: Mutex<State<T>>,
}
unsafe impl<T: Send> Send for Packet<T> { }
unsafe impl<T: Send> Sync for Packet<T> { }
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
blocker: Blocker, // currently blocked task on this channel
buf: Buffer<T>, // storage for buffered messages
cap: usize, // capacity of this channel
/// A curious flag used to indicate whether a sender failed or succeeded in
/// blocking. This is used to transmit information back to the task that it
/// must dequeue its message from the buffer because it was not received.
/// This is only relevant in the 0-buffer case. This obviously cannot be
/// safely constructed, but it's guaranteed to always have a valid pointer
/// value.
canceled: Option<&'static mut bool>,
}
unsafe impl<T: Send> Send for State<T> {}
/// Possible flavors of threads who can be blocked on this channel.
enum Blocker {
BlockedSender(SignalToken),
BlockedReceiver(SignalToken),
NoneBlocked
}
/// Simple queue for threading tasks together. Nodes are stack-allocated, so
/// this structure is not safe at all
struct Queue {
head: *mut Node,
tail: *mut Node,
}
struct Node {
token: Option<SignalToken>,
next: *mut Node,
}
unsafe impl Send for Node {}
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
start: usize,
size: usize,
}
#[derive(Debug)]
pub enum Failure {
Empty,
Disconnected,
}
/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
/// in the meantime. This re-locks the mutex upon returning.
fn wait<'a, 'b, T>(lock: &'a Mutex<State<T>>,
mut guard: MutexGuard<'b, State<T>>,
f: fn(SignalToken) -> Blocker)
-> MutexGuard<'a, State<T>>
{
let (wait_token, signal_token) = blocking::tokens();
match mem::replace(&mut guard.blocker, f(signal_token)) {
NoneBlocked => {}
_ => unreachable!(),
}
drop(guard); // unlock
wait_token.wait(); // block
lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) {
// We need to be careful to wake up the waiting task *outside* of the mutex
// in case it incurs a context switch.
|
pub fn new(cap: usize) -> Packet<T> {
Packet {
channels: AtomicUsize::new(1),
lock: Mutex::new(State {
disconnected: false,
blocker: NoneBlocked,
cap: cap,
canceled: None,
queue: Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
},
buf: Buffer {
buf: (0..cap + if cap == 0 {1} else {0}).map(|_| None).collect(),
start: 0,
size: 0,
},
}),
}
}
// wait until a send slot is available, returning locked access to
// the channel state.
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: ptr::null_mut() };
loop {
let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
// no room; actually block
let wait_token = guard.queue.enqueue(&mut node);
drop(guard);
wait_token.wait();
}
}
pub fn send(&self, t: T) -> Result<(), T> {
let mut guard = self.acquire_send_slot();
if guard.disconnected { return Err(t) }
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
// if our capacity is 0, then we need to wait for a receiver to be
// available to take our data. After waiting, we check again to make
// sure the port didn't go away in the meantime. If it did, we need
// to hand back our data.
NoneBlocked if guard.cap == 0 => {
let mut canceled = false;
assert!(guard.canceled.is_none());
guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
let mut guard = wait(&self.lock, guard, BlockedSender);
if canceled {Err(guard.buf.dequeue())} else {Ok(())}
}
// success, we buffered some data
NoneBlocked => Ok(()),
// success, someone's about to receive our buffered data.
BlockedReceiver(token) => { wakeup(token, guard); Ok(()) }
BlockedSender(..) => panic!("lolwut"),
}
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::TrySendError::Disconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
Err(super::TrySendError::Full(t))
} else if guard.cap == 0 {
// With capacity 0, even though we have buffer space we can't
// transfer the data unless there's a receiver waiting.
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => Err(super::TrySendError::Full(t)),
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => {
guard.buf.enqueue(t);
wakeup(token, guard);
Ok(())
}
}
} else {
// If the buffer has some space and the capacity isn't 0, then we
// just enqueue the data for later retrieval, ensuring to wake up
// any blocked receiver if there is one.
assert!(guard.buf.size() < guard.buf.cap());
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
BlockedReceiver(token) => wakeup(token, guard),
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
}
Ok(())
}
}
// Receives a message from this channel
//
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
let mut waited = false;
if!guard.disconnected && guard.buf.size() == 0 {
guard = wait(&self.lock, guard, BlockedReceiver);
waited = true;
}
if guard.disconnected && guard.buf.size() == 0 { return Err(()) }
// Pick up the data, wake up our neighbors, and carry on
assert!(guard.buf.size() > 0);
let ret = guard.buf.dequeue();
self.wakeup_senders(waited, guard);
return Ok(ret);
}
pub fn try_recv(&self) -> Result<T, Failure> {
let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
if guard.buf.size() == 0 { return Err(Empty) }
// Be sure to wake up neighbors
let ret = Ok(guard.buf.dequeue());
self.wakeup_senders(false, guard);
return ret;
}
// Wake up pending senders after some data has been received
//
// * `waited` - flag if the receiver blocked to receive some data, or if it
// just picked up some data on the way out
// * `guard` - the lock guard that is held over this channel's lock
fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<State<T>>) {
let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
// If this is a no-buffer channel (cap == 0), then if we didn't wait we
// need to ACK the sender. If we waited, then the sender waking us up
// was already the ACK.
let pending_sender2 = if guard.cap == 0 &&!waited {
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedReceiver(..) => unreachable!(),
BlockedSender(token) => {
guard.canceled.take();
Some(token)
}
}
} else {
None
};
mem::drop(guard);
// only outside of the lock do we wake up the pending tasks
pending_sender1.map(|t| t.signal());
pending_sender2.map(|t| t.signal());
}
// Prepares this shared packet for a channel clone, essentially just bumping
// a refcount.
pub fn clone_chan(&self) {
self.channels.fetch_add(1, Ordering::SeqCst);
}
pub fn drop_chan(&self) {
// Only flag the channel as disconnected if we're the last channel
match self.channels.fetch_sub(1, Ordering::SeqCst) {
1 => {}
_ => return
}
// Not much to do other than wake up a receiver if one's there
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => wakeup(token, guard),
}
}
pub fn drop_port(&self) {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If the capacity is 0, then the sender may want its data back after
// we're disconnected. Otherwise it's now our responsibility to destroy
// the buffered data. As with many other portions of this code, this
// needs to be careful to destroy the data *outside* of the lock to
// prevent deadlock.
let _data = if guard.cap!= 0 {
mem::replace(&mut guard.buf.buf, Vec::new())
} else {
Vec::new()
};
let mut queue = mem::replace(&mut guard.queue, Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
});
let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedSender(token) => {
*guard.canceled.take().unwrap() = true;
Some(token)
}
BlockedReceiver(..) => unreachable!(),
};
mem::drop(guard);
loop {
match queue.dequeue() {
Some(token) => { token.signal(); }
None => break,
}
}
waiter.map(|t| t.signal());
}
////////////////////////////////////////////////////////////////////////////
// select implementation
////////////////////////////////////////////////////////////////////////////
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
match mem::replace(&mut guard.blocker, BlockedReceiver(token)) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(..) => unreachable!(),
}
Installed
}
}
// Remove a previous selecting task from this port. This ensures that the
// blocked task will no longer be visible to any other threads.
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
guard.blocker = BlockedSender(token);
true
}
BlockedReceiver(token) => { drop(token); false }
}
}
}
#[unsafe_destructor]
impl<T> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(Ordering::SeqCst), 0);
let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
}
////////////////////////////////////////////////////////////////////////////////
// Buffer, a simple ring buffer backed by Vec<T>
////////////////////////////////////////////////////////////////////////////////
impl<T> Buffer<T> {
fn enqueue(&mut self, t: T) {
let pos = (self.start + self.size) % self.buf.len();
self.size += 1;
let prev = mem::replace(&mut self.buf[pos], Some(t));
assert!(prev.is_none());
}
fn dequeue(&mut self) -> T {
let start = self.start;
self.size -= 1;
self.start = (self.start + 1) % self.buf.len();
let result = &mut self.buf[start];
result.take().unwrap()
}
fn size(&self) -> usize { self.size }
fn cap(&self) -> usize { self.buf.len() }
}
////////////////////////////////////////////////////////////////////////////////
// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
////////////////////////////////////////////////////////////////////////////////
impl Queue {
fn enqueue(&mut self, node: &mut Node) -> WaitToken {
let (wait_token, signal_token) = blocking::tokens();
node.token = Some(signal_token);
node.next = ptr::null_mut();
if self.tail.is_null() {
self.head = node as *mut Node;
self.tail = node as *mut Node;
} else {
unsafe {
(*self.tail).next = node as *mut Node;
self.tail = node as *mut Node;
}
}
wait_token
}
fn dequeue(&mut self) -> Option<SignalToken> {
if self.head.is_null() {
return None
}
let node = self.head;
self.head = unsafe { (*node).next };
if self.head.is_null() {
self.tail = ptr::null_mut();
}
unsafe {
(*node).next = ptr::null_mut();
Some((*node).token.take().unwrap())
}
}
}
|
drop(guard);
token.signal();
}
impl<T> Packet<T> {
|
random_line_split
|
sync.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Synchronous channels/ports
///
/// This channel implementation differs significantly from the asynchronous
/// implementations found next to it (oneshot/stream/share). This is an
/// implementation of a synchronous, bounded buffer channel.
///
/// Each channel is created with some amount of backing buffer, and sends will
/// *block* until buffer space becomes available. A buffer size of 0 is valid,
/// which means that every successful send is paired with a successful recv.
///
/// This flavor of channels defines a new `send_opt` method for channels which
/// is the method by which a message is sent but the task does not panic if it
/// cannot be delivered.
///
/// Another major difference is that send() will *always* return back the data
/// if it couldn't be sent. This is because it is deterministically known when
/// the data is received and when it is not received.
///
/// Implementation-wise, it can all be summed up with "use a mutex plus some
/// logic". The mutex used here is an OS native mutex, meaning that no user code
/// is run inside of the mutex (to prevent context switching). This
/// implementation shares almost all code for the buffered and unbuffered cases
/// of a synchronous channel. There are a few branches for the unbuffered case,
/// but they're mostly just relevant to blocking senders.
use core::prelude::*;
pub use self::Failure::*;
use self::Blocker::*;
use vec::Vec;
use core::mem;
use core::ptr;
use sync::atomic::{Ordering, AtomicUsize};
use sync::mpsc::blocking::{self, WaitToken, SignalToken};
use sync::mpsc::select::StartResult::{self, Installed, Abort};
use sync::{Mutex, MutexGuard};
pub struct Packet<T> {
/// Only field outside of the mutex. Just done for kicks, but mainly because
/// the other shared channel already had the code implemented
channels: AtomicUsize,
lock: Mutex<State<T>>,
}
unsafe impl<T: Send> Send for Packet<T> { }
unsafe impl<T: Send> Sync for Packet<T> { }
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
blocker: Blocker, // currently blocked task on this channel
buf: Buffer<T>, // storage for buffered messages
cap: usize, // capacity of this channel
/// A curious flag used to indicate whether a sender failed or succeeded in
/// blocking. This is used to transmit information back to the task that it
/// must dequeue its message from the buffer because it was not received.
/// This is only relevant in the 0-buffer case. This obviously cannot be
/// safely constructed, but it's guaranteed to always have a valid pointer
/// value.
canceled: Option<&'static mut bool>,
}
unsafe impl<T: Send> Send for State<T> {}
/// Possible flavors of threads who can be blocked on this channel.
enum Blocker {
BlockedSender(SignalToken),
BlockedReceiver(SignalToken),
NoneBlocked
}
/// Simple queue for threading tasks together. Nodes are stack-allocated, so
/// this structure is not safe at all
struct Queue {
head: *mut Node,
tail: *mut Node,
}
struct Node {
token: Option<SignalToken>,
next: *mut Node,
}
unsafe impl Send for Node {}
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
start: usize,
size: usize,
}
#[derive(Debug)]
pub enum Failure {
Empty,
Disconnected,
}
/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
/// in the meantime. This re-locks the mutex upon returning.
fn wait<'a, 'b, T>(lock: &'a Mutex<State<T>>,
mut guard: MutexGuard<'b, State<T>>,
f: fn(SignalToken) -> Blocker)
-> MutexGuard<'a, State<T>>
{
let (wait_token, signal_token) = blocking::tokens();
match mem::replace(&mut guard.blocker, f(signal_token)) {
NoneBlocked =>
|
_ => unreachable!(),
}
drop(guard); // unlock
wait_token.wait(); // block
lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) {
// We need to be careful to wake up the waiting task *outside* of the mutex
// in case it incurs a context switch.
drop(guard);
token.signal();
}
impl<T> Packet<T> {
pub fn new(cap: usize) -> Packet<T> {
Packet {
channels: AtomicUsize::new(1),
lock: Mutex::new(State {
disconnected: false,
blocker: NoneBlocked,
cap: cap,
canceled: None,
queue: Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
},
buf: Buffer {
buf: (0..cap + if cap == 0 {1} else {0}).map(|_| None).collect(),
start: 0,
size: 0,
},
}),
}
}
// wait until a send slot is available, returning locked access to
// the channel state.
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: ptr::null_mut() };
loop {
let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
// no room; actually block
let wait_token = guard.queue.enqueue(&mut node);
drop(guard);
wait_token.wait();
}
}
pub fn send(&self, t: T) -> Result<(), T> {
let mut guard = self.acquire_send_slot();
if guard.disconnected { return Err(t) }
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
// if our capacity is 0, then we need to wait for a receiver to be
// available to take our data. After waiting, we check again to make
// sure the port didn't go away in the meantime. If it did, we need
// to hand back our data.
NoneBlocked if guard.cap == 0 => {
let mut canceled = false;
assert!(guard.canceled.is_none());
guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
let mut guard = wait(&self.lock, guard, BlockedSender);
if canceled {Err(guard.buf.dequeue())} else {Ok(())}
}
// success, we buffered some data
NoneBlocked => Ok(()),
// success, someone's about to receive our buffered data.
BlockedReceiver(token) => { wakeup(token, guard); Ok(()) }
BlockedSender(..) => panic!("lolwut"),
}
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::TrySendError::Disconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
Err(super::TrySendError::Full(t))
} else if guard.cap == 0 {
// With capacity 0, even though we have buffer space we can't
// transfer the data unless there's a receiver waiting.
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => Err(super::TrySendError::Full(t)),
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => {
guard.buf.enqueue(t);
wakeup(token, guard);
Ok(())
}
}
} else {
// If the buffer has some space and the capacity isn't 0, then we
// just enqueue the data for later retrieval, ensuring to wake up
// any blocked receiver if there is one.
assert!(guard.buf.size() < guard.buf.cap());
guard.buf.enqueue(t);
match mem::replace(&mut guard.blocker, NoneBlocked) {
BlockedReceiver(token) => wakeup(token, guard),
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
}
Ok(())
}
}
// Receives a message from this channel
//
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
let mut waited = false;
if!guard.disconnected && guard.buf.size() == 0 {
guard = wait(&self.lock, guard, BlockedReceiver);
waited = true;
}
if guard.disconnected && guard.buf.size() == 0 { return Err(()) }
// Pick up the data, wake up our neighbors, and carry on
assert!(guard.buf.size() > 0);
let ret = guard.buf.dequeue();
self.wakeup_senders(waited, guard);
return Ok(ret);
}
pub fn try_recv(&self) -> Result<T, Failure> {
let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
if guard.buf.size() == 0 { return Err(Empty) }
// Be sure to wake up neighbors
let ret = Ok(guard.buf.dequeue());
self.wakeup_senders(false, guard);
return ret;
}
// Wake up pending senders after some data has been received
//
// * `waited` - flag if the receiver blocked to receive some data, or if it
// just picked up some data on the way out
// * `guard` - the lock guard that is held over this channel's lock
fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<State<T>>) {
let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
// If this is a no-buffer channel (cap == 0), then if we didn't wait we
// need to ACK the sender. If we waited, then the sender waking us up
// was already the ACK.
let pending_sender2 = if guard.cap == 0 &&!waited {
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedReceiver(..) => unreachable!(),
BlockedSender(token) => {
guard.canceled.take();
Some(token)
}
}
} else {
None
};
mem::drop(guard);
// only outside of the lock do we wake up the pending tasks
pending_sender1.map(|t| t.signal());
pending_sender2.map(|t| t.signal());
}
// Prepares this shared packet for a channel clone, essentially just bumping
// a refcount.
pub fn clone_chan(&self) {
self.channels.fetch_add(1, Ordering::SeqCst);
}
pub fn drop_chan(&self) {
// Only flag the channel as disconnected if we're the last channel
match self.channels.fetch_sub(1, Ordering::SeqCst) {
1 => {}
_ => return
}
// Not much to do other than wake up a receiver if one's there
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(token) => wakeup(token, guard),
}
}
pub fn drop_port(&self) {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If the capacity is 0, then the sender may want its data back after
// we're disconnected. Otherwise it's now our responsibility to destroy
// the buffered data. As with many other portions of this code, this
// needs to be careful to destroy the data *outside* of the lock to
// prevent deadlock.
let _data = if guard.cap!= 0 {
mem::replace(&mut guard.buf.buf, Vec::new())
} else {
Vec::new()
};
let mut queue = mem::replace(&mut guard.queue, Queue {
head: ptr::null_mut(),
tail: ptr::null_mut(),
});
let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedSender(token) => {
*guard.canceled.take().unwrap() = true;
Some(token)
}
BlockedReceiver(..) => unreachable!(),
};
mem::drop(guard);
loop {
match queue.dequeue() {
Some(token) => { token.signal(); }
None => break,
}
}
waiter.map(|t| t.signal());
}
////////////////////////////////////////////////////////////////////////////
// select implementation
////////////////////////////////////////////////////////////////////////////
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
match mem::replace(&mut guard.blocker, BlockedReceiver(token)) {
NoneBlocked => {}
BlockedSender(..) => unreachable!(),
BlockedReceiver(..) => unreachable!(),
}
Installed
}
}
// Remove a previous selecting task from this port. This ensures that the
// blocked task will no longer be visible to any other threads.
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
guard.blocker = BlockedSender(token);
true
}
BlockedReceiver(token) => { drop(token); false }
}
}
}
#[unsafe_destructor]
impl<T> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(Ordering::SeqCst), 0);
let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
}
////////////////////////////////////////////////////////////////////////////////
// Buffer, a simple ring buffer backed by Vec<T>
////////////////////////////////////////////////////////////////////////////////
impl<T> Buffer<T> {
fn enqueue(&mut self, t: T) {
let pos = (self.start + self.size) % self.buf.len();
self.size += 1;
let prev = mem::replace(&mut self.buf[pos], Some(t));
assert!(prev.is_none());
}
fn dequeue(&mut self) -> T {
let start = self.start;
self.size -= 1;
self.start = (self.start + 1) % self.buf.len();
let result = &mut self.buf[start];
result.take().unwrap()
}
fn size(&self) -> usize { self.size }
fn cap(&self) -> usize { self.buf.len() }
}
////////////////////////////////////////////////////////////////////////////////
// Queue, a simple queue to enqueue tasks with (stack-allocated nodes)
////////////////////////////////////////////////////////////////////////////////
impl Queue {
fn enqueue(&mut self, node: &mut Node) -> WaitToken {
let (wait_token, signal_token) = blocking::tokens();
node.token = Some(signal_token);
node.next = ptr::null_mut();
if self.tail.is_null() {
self.head = node as *mut Node;
self.tail = node as *mut Node;
} else {
unsafe {
(*self.tail).next = node as *mut Node;
self.tail = node as *mut Node;
}
}
wait_token
}
fn dequeue(&mut self) -> Option<SignalToken> {
if self.head.is_null() {
return None
}
let node = self.head;
self.head = unsafe { (*node).next };
if self.head.is_null() {
self.tail = ptr::null_mut();
}
unsafe {
(*node).next = ptr::null_mut();
Some((*node).token.take().unwrap())
}
}
}
|
{}
|
conditional_block
|
crt-static.rs
|
// Test proc-macro crate can be built without additional RUSTFLAGS
// on musl target
// override -Ctarget-feature=-crt-static from compiletest
// compile-flags: --crate-type proc-macro -Ctarget-feature=
// ignore-wasm32
// ignore-sgx no support for proc-macro crate type
// build-pass
#![crate_type = "proc-macro"]
// FIXME: This don't work when crate-type is specified by attribute
// `#![crate_type = "proc-macro"]`, not by `--crate-type=proc-macro`
// command line flag. This is beacuse the list of `cfg` symbols is generated
// before attributes are parsed. See rustc_interface::util::add_configuration
#[cfg(target_feature = "crt-static")]
compile_error!("crt-static is enabled");
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_derive(Foo)]
pub fn
|
(input: TokenStream) -> TokenStream {
input
}
|
derive_foo
|
identifier_name
|
crt-static.rs
|
// Test proc-macro crate can be built without additional RUSTFLAGS
// on musl target
// override -Ctarget-feature=-crt-static from compiletest
// compile-flags: --crate-type proc-macro -Ctarget-feature=
// ignore-wasm32
// ignore-sgx no support for proc-macro crate type
// build-pass
#![crate_type = "proc-macro"]
// FIXME: This don't work when crate-type is specified by attribute
// `#![crate_type = "proc-macro"]`, not by `--crate-type=proc-macro`
// command line flag. This is beacuse the list of `cfg` symbols is generated
// before attributes are parsed. See rustc_interface::util::add_configuration
#[cfg(target_feature = "crt-static")]
compile_error!("crt-static is enabled");
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_derive(Foo)]
pub fn derive_foo(input: TokenStream) -> TokenStream
|
{
input
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.