file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
elgamal.rs | :
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 512];
ct[0..256].copy_from_slice(&gamma);
ct[256..512].copy_from_slice(&delta);
ct
}
})
}
}
#[derive(Clone)]
pub struct Decryptor(BigUint);
impl<'a> From<&'a PrivateKey> for Decryptor {
fn from(priv_key: &PrivateKey) -> Self {
Decryptor(BigUint::from_bytes_be(&priv_key.0[..]))
}
}
impl Decryptor {
/// Basic ElGamal decryption, following algorithm 8.18 2).
fn decrypt_basic(&self, (gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len()!= 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len()!= 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-ones message
let msg = [1u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
}
/// From `core/java/test/junit/net/i2p/crypto/ElGamalTest.java` in Java I2P.
#[test]
fn test_vectors() {
let pub_key = "pOvBUMrSUUeN5awynzbPbCAwe3MqWprhSpp3OR7pvdfm9PhWaNbPoKRLeEmDoUwyNDoHE0\
E6mcZSG8qPQ8XUZFlczpilOl0MJBvsI9u9SMyi~bEqzSgzh9FNfS-NcGji3q2wI~Ux~q5B\
KOjGlyMLgd1nxl5R5wIYL4uHKZNaYuArsRYmtV~MgMQPGvDtIbdGTV6aL6UbOYryzQSUMY\
OuO3S~YoBjA6Nmi0SeJM3tyTxlI6U1EYjR6oQcI4SOFUW4L~8pfYWijcncCODAqpXVN6ZI\
AJ3a6vjxGu56IDp4xCcKlOEHgdXvqmEC67dR5qf2btH6dtWoB3-Z6QPsS6tPTQ==";
let priv_key = "gMlIhURVXU8uPube20Xr8E1K11g-3qZxOj1riThHqt-rBx72MPq5ivT1rr28cE9mzOmsXi\
bbsuBuQKYDvF7hGICRB3ROSPePYhcupV3j7XiXUIYjWNw9hvylHXK~nTT7jkpIBazBJZfr\
LJPcDZTDB0YnCOHOL-KFn4N1R5B22g0iYRABN~O10AUjQmf1epklAXPqYlzmOYeJSfTPBI\
E44nEccWJp0M0KynhKVbDI0v9VYm6sPFK7WrzRyWwHL~r735wiRkwywuMmKJtA7-PuJjcW\
NLkJwx6WScH2msMzhzYPi8JSZJBl~PosX934l-L0T-KNV4jg1Ih6yoCnm1748A==";
struct TestVector<'a> {
msg: &'a str,
ct: &'a str,
};
let test_vectors = vec![
TestVector {
msg: "",
ct: "AMfISa8KvTpaC7KXZzSvC2axyiSk0xPexBAf29yU~IKq21DzaU19wQcGJg-ktpG4hjGSg7\
u-mJ07b61yo-EGmVGZsv3nYuQYW-GjvsZQa9nm98VljlMtWrxu7TsRXw~SQlWQxMvthqJB\
1A7Y7Qa~C7-UlRytkD-cpVdgUfM-esuMWmjGs6Vc33N5U-tce5Fywa-9y7PSn3ukBO8KGR\
wm7T12~H2gvhgxrVeK2roOzsV7f5dGkvBQRZJ309Vg3j0kjaxWutgI3vli0pzDbSK9d5NR\
-GUDtdOb6IIfLiOckBegcv6I-wlSXjYJe8mIoaK45Ok3rEpHwWKVKS2MeuI7AmsAWgkQmW\
f8irmZaKc9X910VWSO5GYu6006hSc~r2TL3O7vwtW-Z9Oq~sAam9av1PPVJzAx8A4g~m~1\
avtNnncwlChsGo6mZHXqz-QMdMJXXP57f4bx36ZomkvpM-ZLlFAn-a~42KQJAApo4LfEyk\
7DPY2aTXL9ArOCNQIQB4f8QLyjvAvu6M3jzCoGo0wVX6oePfdiokGflriYOcD8rL4NbnCP\
~MSnVzC8LKyRzQVN1tDYj8~njuFqekls6En8KFJ-qgtL4PiYxbnBQDUPoW6y61m-S9r9e9\
y8qWd6~YtdAHAxVlw287~HEp9r7kqI-cjdo1337b7~5dm83KK45g5Nfw==",
},
TestVector {
msg: "hello world",
ct: "AIrd65mG1FJ~9J-DDSyhryVejJBSIjYOqV3GYmHDWgwLchTwq-bJS7dub3ENk9MZ-C6FIN\
gjUFRaLBtfwJnySmNf8pIf1srmgdfqGV2h77ufG5Gs0jggKPmPV~7Z1kTcgsqpL8MyrfXr\
Gi86X5ey-T0SZSFc0X1EhaE-47WlyWaGf-~xth6VOR~KG7clOxaOBpks-7WKZNQf7mpQRE\
4IsPJyj5p1Rf-MeDbVKbK~52IfXSuUZQ8uZr34KMoy4chjn6e-jBhM4XuaQWhsM~a3Q-zE\
pV-ea6t0bQTYfsbG9ch7pJuDPHM64o5mF9FS5-JGr7MOtfP7KDNHiYM2~-uC6BIAbiqBN8\
WSLX1mrHVuhiM-hiJ7U4oq~HYB6N~U980sCIW0dgFBbhalzzQhJQSrC1DFDqGfL5-L25mj\
ArP8dtvN0JY3LSnbcsm-pT9ttFHCPGomLfaAuP7ohknBoXK0j9e6~splg5sUA9TfLeBfqc\
Lr0Sf8b3l~PvmrVkbVcaE8yUqSS6JFdt3pavjyyAQSmSlb2jVNKGPlrov5QLzlbH7G~AUv\
IehsbGQX5ptRROtSojN~iYx3WQTOa-JLEC-AL7RbRu6B62p9I0pD0JgbUfCc4C4l9E9W~s\
MuaJLAXxh0b2miF7C5bzZHxbt~MtZ7Ho5qpZMitXyoE3icb43B6Y1sbA==",
},
TestVector {
msg: "1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
123456789012",
ct: "ACjb0FkTIQbnEzCZlYXGxekznfJad5uW~F5Mbu~0wtsI1O2veqdr7Mb0N754xdIz7929Ti\
1Kz-CxVEAkb3RBbVNcYHLfjy23oQ4BCioDKQaJcdkJqXa~Orm7Ta2tbkhM1Mx05MDrQaVF\
gCVXtwTsPSLVK8VwScjPIFLXgQqqZ5osq~WhaMcYe2I2RCQLOx2VzaKbT21MMbtF70a-nK\
WovkRUNfJEPeJosFwF2duAD0BHHrPiryK9BPDhyOiyN82ahOi2uim1Nt5yhlP3xo7cLV2p\
6kTlR1BNC5pYjtsvetZf6wk-solNUrJWIzcuc18uRDNH5K90GTL6FXPMSulM~E4ATRQfhZ\
fkW9xCrBIaIQM49ms2wONsp7fvI07b1r0rt7ZwCFOFit1HSAKl8UpsAYu-EsIO1qAK7vvO\
UV~0OuBXkMZEyJT-uIVfbE~xrwPE0zPYE~parSVQgi~yNQBxukUM1smAM5xXVvJu8GjmE-\
kJZw1cxaYLGsJjDHDk4HfEsyQVVPZ0V3bQvhB1tg5cCsTH~VNjts4taDTPWfDZmjtVaxxr\
PRII4NEDKqEzg3JBevM~yft-RDfMc8RVlm-gCGANrRQORFii7uD3o9~y~4P2tLnO7Fy3m5\
rdjRsOsWnCQZzw37mcBoT9rEZPrVpD8pjebJ1~HNc764xIpXDWVt8CbA==",
},
TestVector {
msg: "\0x00",
ct: "AHDZBKiWeaIYQS9R1l70IlRnoplwKTkLP2dLlXmVh1gB33kx65uX8OMb3hdZEO0Bbzxkkx\
quqlNn5w166nJO4nPbpEzVfgtY4ClUuv~W4H4CXBr0FcZM1COAkd6rtp6~lUp7cZ8FAkpH\
spl95IxlFM-F1HwiPcbmTjRO1AwCal4sH8S5WmJCvBU6jH6pBPo~9B9vAtP7vX1EwsG2Jf\
CQXkVkfvbWpSicbsWn77aECedS3HkIMrXrxojp7gAiPgQhX4NR387rcUPFsMHGeUraTUPZ\
D7ctk5tpUuYYwRQc5cRKHa4zOq~AQyljx5w5~FByLda--6yCe7qDcILyTygudJ4AHRs1pJ\
RU3uuRTHZx0XJQo~cPsoQ2piAOohITX9~yMCimCgv2EIhY3Z-mAgo8qQ4iMbItoE1cl93I\
u2YV2n4wMq9laBx0shuKOJqO3rjRnszzCbqMuFAXfc3KgGDEaCpI7049s3i2yIcv4vT9uU\
AlrM-dsrdw0JgJiFYl0JXh~TO0IyrcVcLpgZYgRhEvTAdkDNwTs-2GK4tzdPEd34os4a2c\
DPL8joh3jhp~eGoRzrpcdRekxENdzheL4w3wD1fJ9W2-leil1FH6EPc3FSL6e~nqbw69gN\
bsuXAMQ6CobukJdJEy37uKmEw4v6WPyfYMUUacchv1JoNfkHLpnAWifQ==",
},
TestVector {
msg: "\0x00\0x00\0x00",
ct: "AGwvKAMJcPAliP-n7F0Rrj0JMRaFGjww~zvBjyzc~SPJrBF831cMqZFRmMHotgA7S5BrH2\
6CL8okI2N-7as0F2l7OPx50dFEwSVSjqBjVV6SGRFC8oS-ii1FURMz2SCHSaj6kazAYq4s\
DwyqR7vnUrOtPnZujHSU~a02jinyn-QOaHkxRiUp-Oo0jlZiU5xomXgLdkhtuz6725WUDj\
3uVlMtIYfeKQsTdasujHe1oQhUmp58jfg5vgZ8g87cY8rn4p9DRwDBBuo6vi5on7T13sGx\
tY9wz6HTpwzDhEqpNrj~h4JibElfi0Jo8ZllmNTO1ZCNpUQgASoTtyFLD5rk6cIAMK0R7A\
7hjB0aelKM-V7AHkj-Fhrcm8xIgWhKaLn2wKbVNpAkllkiLALyfWJ9dhJ804RWQTMPE-GD\
kBMIFOOJ9MhpEN533OBQDwUKcoxMjl0zOMNCLx8IdCE6cLtUDKJXLB0atnDpLkBer6FwXP\
81EvKDYhtp1GsbiKvZDt8LSPJQnm2EdA3Pr9fpAisJ5Ocaxlfa6~uQCuqGA9nJ9n6w03u-\
ZpSMhSh4zm2s1MqijmaJRc-QNKmN~u1hh3R2hwWNi7FoStMA87sutEBXMdFI8un7StHNSE\
iCYwmmW2Nu3djkM-X8gGjSsdrphTU7uOXbwazmguobFGxI0JujYruM5Q==",
},
TestVector {
msg: "\0x00\0x01\0x02\0x00",
ct: "ALFYtPSwEEW3eTO4hLw6PZNlBKoSIseQNBi034gq6FwYEZsJOAo-1VXcvMviKw2MCP9ZkH\
lTNBfzc79ms2TU8kXxc7zwUc-l2HJLWh6dj2tIQLR8bbWM7U0iUx4XB1B-FEvdhbjz7dsu\
6SBXVhxo2ulrk7Q7vX3kPrePhZZldcNZcS0t65DHYYwL~E~ROjQwOO4Cb~8FgiIUjb8CCN\
w5zxJpBaEt7UvZffkVwj-EWTzFy3DIjWIRizxnsI~mUI-VspPE~xlmFX~TwPS9UbwJDpm8\
-WzINFcehSzF3y9rzSMX-KbU8m4YZj07itZOiIbWgLeulTUB-UgwEkfJBG0xiSUAspZf2~\
t~NthBlpcdrBLADXTJ7Jmkk4MIfysV~JpDB7IVg0v4WcUUwF3sYMmBCdPCwyYf0hTrl2Yb\
L6kmm4u97WgQqf0TyzXtVZYwjct4LzZlyH591y6O6AQ4Fydqos9ABInzu-SbXq6S1Hi6vr\
aNWU3mcy2myie32EEXtkX7P8eXWY35GCv9ThPEYHG5g1qKOk95ZCTYYwlpgeyaMKsnN3C~\
x9TJA8K8T44v7vE6--Nw4Z4zjepwkIOht9iQsA6D6wRUQpeYX8bjIyYDPC7GUHq0WhXR6E\
6Ojc9k8V5uh0SZ-rCQX6sccdk3JbyRhjGP4rSKr6MmvxVVsqBjcbpxsg==",
},
];
let enc = {
let mut data = [0u8; 256];
data.copy_from_slice(&I2P_BASE64.decode(pub_key.as_bytes()).unwrap());
Encryptor::from(&PublicKey(data))
};
let dec = {
let mut data = [0u8; 256];
data.copy_from_slice(&I2P_BASE64.decode(priv_key.as_bytes()).unwrap());
Decryptor::from(&PrivateKey(data))
};
for tv in test_vectors {
let msg = tv.msg.as_bytes();
let ct = I2P_BASE64.decode(tv.ct.as_bytes()).unwrap();
// Check round-trip
assert_eq!(
dec.decrypt(&enc.encrypt(msg, true).unwrap(), true).unwrap(),
msg
);
assert_eq!(
dec.decrypt(&enc.encrypt(msg, false).unwrap(), false)
.unwrap(),
msg
); |
// Check test vector
assert_eq!(dec.decrypt(&ct, true).unwrap(), msg);
} | random_line_split |
|
elgamal.rs |
};
// γ = α^k mod p
let gamma = ELGAMAL_G.modpow(&k, &ELGAMAL_P);
(k, gamma)
}
/// Generates ElGamal keypairs.
pub struct KeyPairGenerator;
impl KeyPairGenerator {
/// ElGamal key generation, following algorithm 8.17.
pub fn generate() -> (PrivateKey, PublicKey) {
// Select a random integer a, 1 <= a <= p - 2
// Public key is α^a mod p
let (a, alpha_a) = gen_gamma_k();
let priv_key = {
let buf = rectify(&a, 256);
let mut x = [0u8; 256];
x.copy_from_slice(&buf[..]);
PrivateKey(x)
};
let pub_key = {
let buf = rectify(&alpha_a, 256);
let mut x = [0u8; 256];
x.copy_from_slice(&buf[..]);
PublicKey(x)
};
(priv_key, pub_key)
}
}
pub struct Encryptor(BigUint);
impl<'a> From<&'a PublicKey> for Encryptor {
fn from(pub_key: &PublicKey) -> Self {
Encryptor(BigUint::from_bytes_be(&pub_key.0[..]))
}
}
impl Encryptor {
/// Basic ElGamal encryption, following algorithm 8.18 1).
fn encrypt_basic(&self, msg: &[u8]) -> Result<(BigUint, BigUint), Error> {
// Represent the message as an integer m in the range {0, 1,..., p - 1}
let m = BigUint::from_bytes_be(msg);
if m > *ELGAMAL_PM1 {
return Err(Error::InvalidMessage);
}
// Select a random integer k, 1 <= k <= p - 2
// γ = α^k mod p
let (k, gamma) = gen_gamma_k();
// δ = m * (α^a)^k mod p
let s = self.0.modpow(&k, &ELGAMAL_P);
let delta = m.mul(s).rem(&(*ELGAMAL_P));
Ok((gamma, delta))
}
/// ElGamal encryption using I2P's message and ciphertext encoding schemes.
pub fn encrypt(&self, msg: &[u8], include_zeroes: bool) -> Result<Vec<u8>, Error> {
// Message must be no more than 222 bytes
if msg.len() > 222 {
return Err(Error::InvalidMessage);
}
let mut rng = OsRng;
let hash = Sha256::digest(msg);
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let mut data = Vec::with_capacity(33 + msg.len());
data.push(loop {
let val = rng.gen();
if val!= 0 {
break val;
}
});
data.extend_from_slice(hash.as_slice());
data.extend_from_slice(msg);
self.encrypt_basic(&data).map(|(gamma, delta)| {
if include_zeroes {
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 514];
ct[1..257].copy_from_slice(&gamma);
ct[258..514].copy_from_slice(&delta);
ct
} else {
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 512];
ct[0..256].copy_from_slice(&gamma);
ct[256..512].copy_from_slice(&delta);
ct
}
})
}
}
#[derive(Clone)]
pub struct Decryptor(BigUint);
impl<'a> From<&'a PrivateKey> for Decryptor {
fn from(priv_key: &PrivateKey) -> Self {
Decryptor(BigUint::from_bytes_be(&priv_key.0[..]))
}
}
impl Decryptor {
/// Basic ElGamal decryption, following algorithm 8.18 2).
fn decrypt_basic(&self, (gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len()!= 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len()!= 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-ones message
let msg = [1u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
}
/// From `core/java/test/junit/net/i2p/crypto/ElGamalTest.java` in Java I2P.
#[test]
fn test_vectors() {
let pub_key = "pOvBUMrSUUeN5awynzbPbCAwe3MqWprhSpp3OR7pvdfm9PhWaNbPoKRLeEmDoUwyNDoHE0\
E6mcZSG8qPQ8XUZFlczpilOl0MJBvsI9u9SMyi~bEqzSgzh9FNfS-NcGji3q2wI~Ux~q5B\
KOjGlyMLgd1nxl5R5wIYL4uHKZNaYuArsRYmtV~MgMQPGvDtIbdGTV6aL6UbOYryzQSUMY\
OuO3S~YoBjA6Nmi0SeJM3tyTxlI6U1EYjR6oQcI4SOFUW4L~8pfYWijcncCODAqpXVN6ZI\
AJ3a6vjxGu56IDp4xCcKlOEHgdXvqmEC67dR5qf2btH6dtWoB3-Z6QPsS6tPTQ==";
let priv_key = "gMlIhURVXU8uPube20Xr8E1K11g-3qZxOj1riThHqt-rBx72MPq5ivT1rr28cE9mzOmsXi\
bbsuBuQKYDvF7hGICRB3ROSPePYhcupV3j7XiXUIYjWNw9hvylHXK~nTT7jkpIBazBJZfr\
LJPcDZTDB0YnCOHOL-KFn4N1R5B22g0iYRABN~O10AUjQmf1epklAXPqYlzmOYeJSfTPBI\
E44nEccWJp0M0KynhKVbDI0v9VYm6sPFK7WrzRyWwHL~r735wiRkwywuMmKJtA7-PuJjcW\
NLkJwx6WScH2msMzhzYPi8JSZJBl~PosX934l-L0T-KNV4jg1Ih6yoCnm1748A==";
struct TestVector<'a> {
msg: &'a str,
ct: &'a str,
};
let test_vectors = vec![
TestVector {
msg: "",
ct: "AMfISa8KvTpaC7KXZzSvC2axyiSk0xPexBAf29yU~IKq21DzaU19wQcGJg-ktpG4hjGSg7\
u-mJ07b61yo-EGmVGZsv3nYuQYW-GjvsZQa9nm98VljlMtWrxu7TsRXw~SQlWQxMvthqJB\
1A7Y7Qa~C7-UlRytkD-cpVdgUfM-esuMWmjGs6Vc33N5U-tce5Fywa-9y7PSn3ukBO8KGR\
wm7T12~H2gvhgxrVeK2roOzsV7f5dGkvBQRZJ309Vg3j0kjaxWutgI3vli0pzDbSK9d5NR\
-GUDtdOb6IIfLiOckBegcv6I-wlSXjYJe8mIoaK45Ok3rEpHwWKVKS2MeuI7AmsAWgkQmW\
f8irmZaKc9X910VWSO5GYu6006hSc~r2TL3O7vwtW-Z9Oq~sAam9av1PPVJzAx8A4g~m~1\
avtNnncwlChsGo6mZHXqz-QMdMJXXP57f4bx36ZomkvpM-ZLlFAn-a~42KQJAApo4LfEyk\
7DPY2aTXL9ArOCNQIQB4f8QLyjvAvu6M3jzCoGo0wVX6oePfdiokGflriYOcD8rL4NbnCP\
~MSnVzC8LKyRzQVN1tDYj8~njuFqekls6En8KFJ-qgtL4PiYxbnBQDUPoW6y61m-S9r9e9\
y8qWd6~YtdAHAxVlw287~HEp9r7kqI-cjdo1337b7~5dm83KK45g5Nfw==",
},
TestVector {
msg: "hello world",
ct: "AIrd65mG1FJ~9J-DDSyhryVejJBSIjYOqV3GYmHDWgwLchTwq-bJS7dub3ENk9MZ-C6FIN\
gjUFRaLBtfwJnySmNf8pIf1srmgdfqGV2h77ufG5Gs0jggKPmPV~7Z1kTcgsqpL8MyrfXr\
Gi86X5ey-T0SZSFc0X1EhaE-47WlyWaGf-~xth6VOR~KG7clOxaOBpks-7WKZNQf7mpQRE\
4IsPJyj5p1Rf-MeDbVKbK~52IfXSuUZQ8uZr34KMoy4chjn6e-jBhM4XuaQWhsM~a3Q-zE\
pV-ea6t0bQTYfsbG9ch7pJuDPHM64o5mF9FS5-JGr7MOtfP7KDNHiYM2~-uC6BIAbiqBN8\
WSLX1mrHVuhiM-hiJ7U4oq~HYB6N~U980sCIW0dgFBbhalzzQhJQSrC1DFDqGfL5-L25mj\
ArP8dtvN0JY3LSnbcsm-pT9ttFHCPGomLfaAuP7ohknBoXK0j9e6~splg5sUA9TfLeBfqc\
Lr0Sf8b3l~PvmrVkbVcaE8yUqSS6JFdt3pavjyyAQSmSlb2jVNKGPlrov5QLzlbH7G~AUv\
IehsbGQX5ptRROtSojN~iYx3WQTOa-JLEC-AL7RbRu6B62p9I0pD0JgbUfCc4C4l9E9W~s\
MuaJLAXxh0b2miF7C5bzZHxbt~MtZ7Ho5qpZMitXyoE3icb43B6Y1sbA==",
},
TestVector {
msg: "1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
123456789012",
ct: "ACjb0FkTIQbnEzCZlYXGxekznfJad5uW~F5Mbu~0wtsI1O2veqdr7Mb0N754xdIz7929Ti\
1Kz-CxVEAkb3RBbVNcYHLfjy23oQ4BCioDKQaJcdkJqXa~Orm7Ta2tbkhM1Mx05MDrQaVF\
gCVXtwTsPSLVK8VwScjPIFLXgQqqZ5osq~WhaMcYe2I2RCQLOx2VzaKbT21MMbtF70a-nK\
WovkRUNfJEPeJosFwF2duAD0BHHrPiryK9BPDhyOiyN82ahOi2uim1Nt5yhlP3xo7cLV2p\
6kTlR1BNC5pYjtsvetZf6wk-solNUrJWIzcuc18uRDNH5K90GTL6FXPMSulM~E4ATRQfhZ\
fkW9xCrBIaIQM49ms2wONsp7fvI07b1r0rt7ZwCFOFit1HSAKl8UpsAYu-EsIO1qAK7vvO\
UV~0OuBXkMZEyJT-uIVfbE~xrwPE0zPYE~parSVQgi~yNQBxukUM1smAM5xXVvJu8GjmE-\
kJZw1cxaYLGsJjDHDk4HfEsyQVVPZ0V3bQvhB1tg5cCsTH~VNjts4taDTPWfDZmjtVaxxr\
PRII4NEDKqEzg3JBevM~yft-RDfMc8RVlm-gCGANrRQORFii7uD3o9~y~4P2tLnO7Fy3m5\
rdjRsOsWnCQZzw37mcBoT9rEZPrVpD8pjebJ1~HNc764xIpXDWVt8CbA==",
},
TestVector {
msg: "\0x00",
ct: "AHDZBKiWeaIYQS9R1l70IlRnoplwKTkLP2dLlXmVh1gB33kx65uX8OMb3hdZEO0Bbzxkkx\
quqlNn5w166nJO4nPbpEzVfgtY4ClUuv~W4H4CXBr0FcZM1COAkd6rtp6~lUp7cZ8FAkpH\
spl95IxlFM-F1HwiPcbmTjRO1AwCal4sH8S5WmJCvBU6jH6pBPo~9B9vAtP7vX1EwsG2Jf\
CQXkVkfvbWpSicbsWn77aECedS3HkIMrXrxojp7gAiPgQhX4NR387rcUPFsMHGeUraTUPZ\
D7ctk5tpUuYYwRQc5cRKHa4zOq~AQyljx5w5~FByLda--6yCe7qDcILyTygudJ4AHRs1pJ\
RU3uuRTHZx0XJQo~cPsoQ2piAOohITX9~yMCimCgv2EIhY3Z-mAgo8qQ4iMbItoE1cl93I\
u2YV2n4wMq9laBx0shuKOJqO3rjRnszzCbqMuFAXfc3KgGDEaCpI7049s3i2yIcv4vT9uU\
AlrM-dsrdw0JgJiFYl0JXh~TO0IyrcVcLpgZYgRhEvTAdkDNwTs-2GK4tzdPEd34os4a2c\
DPL8joh3jhp~eGoRzrpcdRekxENdzheL4w3wD1fJ9W2-leil1FH6EPc3FSL6e~nqbw69gN\
bsuXAMQ6CobukJdJEy37uKmEw4v6WPyfYMUUacchv1JoNfkHLpnAWifQ==",
},
TestVector {
msg: "\0x00\0x00\0x00",
ct: "AGwvKAMJcPAliP-n7F0Rrj0JMRaFGjww~zvBjyzc~SPJrBF831cMqZFRmMHotgA7S5BrH2\
6CL8okI2N-7as0F2l7OPx50dFEwSVSjqBjVV6SGRFC8oS-ii1FURMz2SCHSaj6kazAYq4s\
DwyqR7vnUrOtPnZujHSU~a02jinyn-QOaHkxRiUp-Oo0jlZiU5xomXgLdkhtuz6725WUDj\
3uVlMtIYfeKQsTdasujHe1oQhUmp58jfg5vgZ8g87cY8rn4p9DRwDBBuo6vi5on7T13sGx\
tY9wz6HTpwzDhEqpNrj~h4JibElfi0Jo8ZllmNTO1ZCNpUQgASoTtyFLD5rk6cIAMK0R7A\
7hjB0aelKM-V7AHkj-Fhrcm8xIgWhKaLn2wKbVNpAkllkiLALyfWJ9dhJ804 | {
break k;
} | conditional_block |
|
elgamal.rs | u8; 256];
x.copy_from_slice(&buf[..]);
PrivateKey(x)
};
let pub_key = {
let buf = rectify(&alpha_a, 256);
let mut x = [0u8; 256];
x.copy_from_slice(&buf[..]);
PublicKey(x)
};
(priv_key, pub_key)
}
}
pub struct Encryptor(BigUint);
impl<'a> From<&'a PublicKey> for Encryptor {
fn from(pub_key: &PublicKey) -> Self {
Encryptor(BigUint::from_bytes_be(&pub_key.0[..]))
}
}
impl Encryptor {
/// Basic ElGamal encryption, following algorithm 8.18 1).
fn encrypt_basic(&self, msg: &[u8]) -> Result<(BigUint, BigUint), Error> {
// Represent the message as an integer m in the range {0, 1,..., p - 1}
let m = BigUint::from_bytes_be(msg);
if m > *ELGAMAL_PM1 {
return Err(Error::InvalidMessage);
}
// Select a random integer k, 1 <= k <= p - 2
// γ = α^k mod p
let (k, gamma) = gen_gamma_k();
// δ = m * (α^a)^k mod p
let s = self.0.modpow(&k, &ELGAMAL_P);
let delta = m.mul(s).rem(&(*ELGAMAL_P));
Ok((gamma, delta))
}
/// ElGamal encryption using I2P's message and ciphertext encoding schemes.
pub fn encrypt(&self, msg: &[u8], include_zeroes: bool) -> Result<Vec<u8>, Error> {
|
self.encrypt_basic(&data).map(|(gamma, delta)| {
if include_zeroes {
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 514];
ct[1..257].copy_from_slice(&gamma);
ct[258..514].copy_from_slice(&delta);
ct
} else {
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 512];
ct[0..256].copy_from_slice(&gamma);
ct[256..512].copy_from_slice(&delta);
ct
}
})
}
}
#[d
erive(Clone)]
pub struct Decryptor(BigUint);
impl<'a> From<&'a PrivateKey> for Decryptor {
fn from(priv_key: &PrivateKey) -> Self {
Decryptor(BigUint::from_bytes_be(&priv_key.0[..]))
}
}
impl Decryptor {
/// Basic ElGamal decryption, following algorithm 8.18 2).
fn decrypt_basic(&self, (gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len()!= 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len()!= 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-ones message
let msg = [1u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
}
/// From `core/java/test/junit/net/i2p/crypto/ElGamalTest.java` in Java I2P.
#[test]
fn test_vectors() {
let pub_key = "pOvBUMrSUUeN5awynzbPbCAwe3MqWprhSpp3OR7pvdfm9PhWaNbPoKRLeEmDoUwyNDoHE0\
E6mcZSG8qPQ8XUZFlczpilOl0MJBvsI9u9SMyi~bEqzSgzh9FNfS-NcGji3q2wI~Ux~q5B\
KOjGlyMLgd1nxl5R5wIYL4uHKZNaYuArsRYmtV~MgMQPGvDtIbdGTV6aL6UbOYryzQSUMY\
OuO3S~YoBjA6Nmi0SeJM3tyTxlI6U1EYjR6oQcI4SOFUW4L~8pfYWijcncCODAqpXVN6ZI\
AJ3a6vjxGu56IDp4xCcKlOEHgdXvqmEC67dR5qf2btH6dtWoB3-Z6QPsS6tPTQ==";
let priv_key = "gMlIhURVXU8uPube20Xr8E1K11g-3qZxOj1riThHqt-rBx72MPq5ivT1rr28cE9mzOmsXi\
bbsuBuQKYDvF7hGICRB3ROSPePYhcupV3j7XiXUIYjWNw9hvylHXK~nTT7jkpIBazBJZfr\
LJPcDZTDB0YnCOHOL-KFn4N1R5B22g0iYRABN~O10AUjQmf1epklAXPqYlzmOYeJSfTPBI\
E44nEccWJp0M0KynhKVbDI0v9VYm6sPFK7WrzRyWwHL~r735wiRkwywuMmKJtA7-PuJjcW\
NLkJwx6WScH2msMzhzYPi8JSZJBl~PosX934l-L0T-KNV4jg1Ih6yoCnm1748A==";
struct TestVector<'a> {
msg: &'a str,
ct: &'a str,
};
let test_vectors = vec![
TestVector {
msg: "",
ct: "AMfISa8KvTpaC7KXZzSvC2axyiSk0xPexBAf29yU~IKq21DzaU19wQcGJg-ktpG4hjGSg7\
u-mJ07b61yo-EGmVGZsv3nYuQYW-GjvsZQa9nm98VljlMtWrxu7TsRXw~SQlWQxMvthqJB\
1A7Y7Qa~C7-UlRytkD-cpVdgUfM-esuMWmjGs6Vc33N5U-tce5Fywa-9y7PSn3ukBO8KGR\
wm7T12~H2gvhgxrVeK2roOzsV7f5dGkvBQRZJ309Vg3j0kjaxWutgI3vli0pzDbSK9d5NR\
-GUDtdOb6IIfLiOckBegcv6I-wlSXjYJe8mIoaK45Ok3rEpHwWKVKS2MeuI7AmsAWgkQmW\
f8irmZaKc9X910VWSO5GYu6006hSc~r2TL3O7vwtW-Z9Oq~sAam9av1PPVJzAx8A4g~m~1\
avtNnncwlChsGo6mZHXqz-QMdMJXXP57f4bx36ZomkvpM-ZLlFAn-a~42KQJAApo4LfEyk\
7DPY2aTXL9ArOCNQIQB4f8QLyjvAvu6M3jzCoGo0wVX6oePfdiokGflriYOcD8rL4NbnCP\
~MSnVzC8LKyRzQVN1tDYj8~njuFqekls6En8KFJ-qgtL4PiYxbnBQDUPoW6y61m-S9r9e9\
y8qWd6~YtdAHAxVlw287~HEp9r7kqI-cjdo1337b7~5dm83KK45g5Nfw==",
},
TestVector {
msg: "hello world",
ct: "AIrd65mG1FJ~9J-DDSyhryVejJBSIjYOqV3GYmHDWgwLchTwq-bJS7dub3ENk9MZ-C6FIN\
gjUFRaLBtfwJnySmNf8pIf1srmgdfqGV2h77ufG5Gs0jggKPmPV~7Z1kTcgsqpL8MyrfXr\
Gi86X5ey-T0SZSFc0X1EhaE-47WlyWaGf-~xth6VOR~KG7clOxaOBpks-7WKZNQf7mpQRE\
4IsPJyj5p1Rf-MeDbVKbK~52IfXSuUZQ8uZr34KMoy4chjn6e-jBhM4XuaQWhsM~a3Q-zE\
pV-ea6t0bQTYfsbG9ch7pJuDPHM64o5mF9FS5-JGr7MOtfP7KDNHiYM2~-uC6BIAbiqBN8\
WSLX1mrHVuhiM-hiJ7U4oq~HYB6N~U980sCIW0dgFBbhalzzQhJQSrC1DFDqGfL5-L25mj\
ArP8dtvN0JY3LSnbcsm-pT9ttFHCPGomLfaAuP7ohknBoXK0j9e6~splg5sUA9TfLeBfqc\
Lr0Sf8b3l~PvmrVkbVcaE8yUqSS6JFdt3pavjyyAQSmSlb2jVNKGPlrov5QLzlbH7G~AUv\
IehsbGQX5ptRROtSojN~iYx3WQTOa-JLEC-AL7RbRu6B62p9I0pD0JgbUfCc4C4l9E9W~s\
MuaJLAXxh0b2miF7C5bzZHxbt~MtZ7Ho5qpZMitXyoE3icb43B6Y1sbA==",
},
TestVector {
msg: "1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
123456789012",
ct: "ACjb0FkTIQbnEzCZlYXGxekznfJad5uW~F5Mbu~0wtsI1O2veqdr7Mb0N754xdIz7929Ti\
1Kz-CxVEAkb3RBbVNcYHLfjy23oQ4BCioDKQaJcdkJqXa~Orm7Ta2tbkhM1Mx05MDrQaVF\
gCVXtwTsPSLVK8VwScjPIFLXgQqqZ5osq~WhaMcYe2I2RCQLOx2VzaKbT21MMbtF70a-nK\
WovkRUNfJEPeJosFwF2duAD0BHHrPiryK9BPDhyOiyN82ahOi2uim1Nt5yhlP3xo7cLV2p\
6kTlR1BNC5pYjtsvetZf6wk-solNUrJWIzcuc18uRDNH5K90GTL6FXPMSulM~E4ATRQfhZ\
fkW9xCrBIaIQM49ms2wONsp7fvI07b1r0rt7ZwCFOFit1HSAKl8UpsAYu-EsIO1qAK7vvO\
UV~0OuBXkMZEyJT-uIVfbE~xrwPE0zPYE~parSVQgi~yNQBxukUM1smAM5xXVvJu8GjmE-\
kJZw1cxaYLGsJjDHDk4HfEsyQVVPZ0V3bQvhB1tg5cCsTH~VNjts4taDTPWfDZmjtVaxxr\
PRII4NEDKqEzg3JBevM~yft-RDfMc8RVlm-gCGANrRQORFii7uD3o9~y~4P2tLnO7Fy3m5\
rdjRsOsWnCQZzw37mcBoT9rEZPrVpD8pjebJ1~HNc764xIpXDWVt8CbA==",
},
TestVector {
msg: "\0x00",
ct: "AHDZBKiWeaIYQS9R1l70IlRnoplwKTkLP2dLlXmVh1gB33kx65uX8OMb3hdZEO0Bbzxkkx\
quqlNn5w166nJO4nPbpEzVfgtY4ClUuv~W4H4CXBr0FcZM1COAkd6rtp6~lUp7cZ8FAkpH\
spl95IxlFM-F1HwiPcbmTjRO1AwCal4sH8S5WmJCvBU6jH6pBPo~9B9vAtP7vX1EwsG2Jf\
CQXkVkfvbWpSicbsWn77aECedS3HkIMrXrxojp7gAiPgQhX4NR387rcUPFsMHGeUraTUPZ\
D7ctk5tpUuYYwRQc5cRKHa4zOq~AQyljx5w5~FByLda--6yCe7qDcILyTygudJ4AHRs1pJ\
RU3uuRTHZx0XJQo~cPsoQ2piAOohITX9~yMCimCgv2EIhY3Z-mAgo8qQ4iMbItoE1cl93I\
u2YV2n4wMq9laBx0shuKOJqO3rjRnszzCbqMuFAXfc3KgGDEaCpI7049s3i2yIcv4vT9uU\
AlrM-dsrdw0JgJiFYl0JXh~TO0IyrcVcLpgZYgRhEvTAdkDNwTs-2GK4tzdPEd34os4a2c\
DPL8joh3jhp~eGoRzrpcdRekxENdzheL4w3wD1fJ9W2-leil1FH6EPc3FSL6e~nqbw69gN\
bsuXAMQ6CobukJdJEy37uKmEw4v6WPyfYMUUacchv1JoNfkHLpnAWifQ==",
},
TestVector {
msg: "\0x00\0x00\0x00",
ct: "AGwvKAMJcPAliP-n7F0Rrj0JMRaFGjww~zvBjyzc~SPJrBF831cMqZFRmMHotgA7S5BrH2\
6CL8okI2N-7as0F2l7OPx50dFEwSVSjqBjVV6SGRFC8oS-ii1FURMz2SCHSaj6kazAYq4s\
DwyqR7vnUrOtPnZujHSU~a02jinyn-QOaHkxRiUp-Oo0jlZiU5xomXgLdkhtuz6725WUDj\
3uVlMtIYfeKQsTdasujHe1oQhUmp58jfg5vgZ8g87cY8rn4p9DRwDBBuo6vi5on7T13sGx\
tY9wz6HTpwzDhEqpNrj~h4JibElfi0Jo8ZllmNTO1ZCNpUQgASoTtyFLD5rk6cIAMK0R7A\
7hjB0aelKM-V7AHkj-Fhrcm8xIgWhKaLn2wKbVNpAkllkiLALyfWJ9dhJ804RWQTMPE-GD\
kBMIFOOJ9MhpEN533OBQDwUKcoxMjl0zOMNCLx8IdCE6cLtUDKJXLB0atnDpLkBer6FwXP\
81EvKDYhtp1GsbiKvZDt8LSPJQnm2EdA3Pr9fpAisJ5Ocaxlfa6~uQCuqGA9nJ9n6w03u-\
ZpSMhSh4zm2s1MqijmaJRc-QNKmN~u1hh3R2hwWNi7FoStMA87sutEBXMdFI8un7StHNSE\
iCYwmmW2Nu3djkM-X8gGjSsdrphTU7uOX | // Message must be no more than 222 bytes
if msg.len() > 222 {
return Err(Error::InvalidMessage);
}
let mut rng = OsRng;
let hash = Sha256::digest(msg);
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let mut data = Vec::with_capacity(33 + msg.len());
data.push(loop {
let val = rng.gen();
if val != 0 {
break val;
}
});
data.extend_from_slice(hash.as_slice());
data.extend_from_slice(msg); | identifier_body |
elgamal.rs | 0u8; 256];
x.copy_from_slice(&buf[..]);
PrivateKey(x)
};
let pub_key = {
let buf = rectify(&alpha_a, 256);
let mut x = [0u8; 256];
x.copy_from_slice(&buf[..]);
PublicKey(x)
};
(priv_key, pub_key)
}
}
pub struct Encryptor(BigUint);
impl<'a> From<&'a PublicKey> for Encryptor {
fn from(pub_key: &PublicKey) -> Self {
Encryptor(BigUint::from_bytes_be(&pub_key.0[..]))
}
}
impl Encryptor {
/// Basic ElGamal encryption, following algorithm 8.18 1).
fn encrypt_basic(&self, msg: &[u8]) -> Result<(BigUint, BigUint), Error> {
// Represent the message as an integer m in the range {0, 1,..., p - 1}
let m = BigUint::from_bytes_be(msg);
if m > *ELGAMAL_PM1 {
return Err(Error::InvalidMessage);
}
// Select a random integer k, 1 <= k <= p - 2
// γ = α^k mod p
let (k, gamma) = gen_gamma_k();
// δ = m * (α^a)^k mod p
let s = self.0.modpow(&k, &ELGAMAL_P);
let delta = m.mul(s).rem(&(*ELGAMAL_P));
Ok((gamma, delta))
}
/// ElGamal encryption using I2P's message and ciphertext encoding schemes.
pub fn encrypt(&self, msg: &[u8], include_zeroes: bool) -> Result<Vec<u8>, Error> {
// Message must be no more than 222 bytes
if msg.len() > 222 {
return Err(Error::InvalidMessage);
}
let mut rng = OsRng;
let hash = Sha256::digest(msg);
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let mut data = Vec::with_capacity(33 + msg.len());
data.push(loop {
let val = rng.gen();
if val!= 0 {
break val;
}
});
data.extend_from_slice(hash.as_slice());
data.extend_from_slice(msg);
self.encrypt_basic(&data).map(|(gamma, delta)| {
if include_zeroes {
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 514];
ct[1..257].copy_from_slice(&gamma);
ct[258..514].copy_from_slice(&delta);
ct
} else {
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = rectify(&gamma, 256);
let delta = rectify(&delta, 256);
let mut ct = vec![0; 512];
ct[0..256].copy_from_slice(&gamma);
ct[256..512].copy_from_slice(&delta);
ct
}
})
}
}
#[derive(Clone)]
pub struct Decryptor(BigUint);
impl<'a> From<&'a PrivateKey> for Decryptor {
fn from(priv_key: &PrivateKey) -> Self {
Decryptor(BigUint::from_bytes_be(&priv_key.0[..]))
}
}
impl Decryptor {
/// Basic ElGamal decryption, following algorithm 8.18 2).
fn decrypt | (gamma, delta): (BigUint, BigUint)) -> Vec<u8> {
// γ^{-a} = γ^{p-1-a}
let gamma_neg_a = gamma.modpow(&(&(*ELGAMAL_PM1)).sub(&self.0), &ELGAMAL_P);
// m = (γ^{-a}) * δ mod p
let m = gamma_neg_a.mul(delta).rem(&(*ELGAMAL_P));
m.to_bytes_be()
}
/// ElGamal decryption using I2P's message and ciphertext encoding schemes.
pub fn decrypt(&self, ct: &[u8], has_zeroes: bool) -> Result<Vec<u8>, Error> {
let (gamma, delta) = if has_zeroes {
// Ciphertext must be 514 bytes
if ct.len()!= 514 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 1 257 258 514
// | 0 | padding zeroes | gamma | 0 | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..257]);
let delta = BigUint::from_bytes_be(&ct[257..]);
(gamma, delta)
} else {
// Ciphertext must be 512 bytes
if ct.len()!= 512 {
return Err(Error::InvalidCiphertext);
}
// ElGamal ciphertext:
// 0 256 512
// | padding zeroes | gamma | padding zeroes | delta |
let gamma = BigUint::from_bytes_be(&ct[..256]);
let delta = BigUint::from_bytes_be(&ct[256..]);
(gamma, delta)
};
let data = self.decrypt_basic((gamma, delta));
if data.len() < 33 {
// Decrypted data is too small
return Err(Error::InvalidCiphertext);
}
// ElGamal plaintext:
// 0 1 33
// | nonzero byte | SHA256(msg) | msg |
let msg = data[33..].to_vec();
let hash = Sha256::digest(&msg);
if hash.as_slice() == &data[1..33] {
Ok(msg)
} else {
Err(Error::InvalidCiphertext)
}
}
}
#[cfg(test)]
mod tests {
use super::{Decryptor, Encryptor, KeyPairGenerator};
use crate::constants::I2P_BASE64;
use crate::crypto::{PrivateKey, PublicKey};
#[test]
fn round_trip_basic() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// All-zeroes message is returned as a single byte
let msg = [0u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt, &[0]);
// All-ones message is returned as-is
let msg = [1u8; 256];
let ct = enc.encrypt_basic(&msg[..]).unwrap();
let pt = dec.decrypt_basic(ct);
assert_eq!(&pt[..], &msg[..]);
}
#[test]
fn round_trip() {
let (priv_key, pub_key) = KeyPairGenerator::generate();
let enc = Encryptor::from(&pub_key);
let dec = Decryptor::from(&priv_key);
// Message too long
assert!(enc.encrypt(&[0u8; 223], true).is_err());
// Full-width all-zeroes message
let msg = [0u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-zeroes message
let msg = [0u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Full-width all-ones message
let msg = [1u8; 222];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
// Short all-ones message
let msg = [1u8; 8];
let ct = enc.encrypt(&msg[..], true).unwrap();
let pt = dec.decrypt(&ct, true).unwrap();
assert_eq!(&pt[..], &msg[..]);
}
/// From `core/java/test/junit/net/i2p/crypto/ElGamalTest.java` in Java I2P.
#[test]
fn test_vectors() {
let pub_key = "pOvBUMrSUUeN5awynzbPbCAwe3MqWprhSpp3OR7pvdfm9PhWaNbPoKRLeEmDoUwyNDoHE0\
E6mcZSG8qPQ8XUZFlczpilOl0MJBvsI9u9SMyi~bEqzSgzh9FNfS-NcGji3q2wI~Ux~q5B\
KOjGlyMLgd1nxl5R5wIYL4uHKZNaYuArsRYmtV~MgMQPGvDtIbdGTV6aL6UbOYryzQSUMY\
OuO3S~YoBjA6Nmi0SeJM3tyTxlI6U1EYjR6oQcI4SOFUW4L~8pfYWijcncCODAqpXVN6ZI\
AJ3a6vjxGu56IDp4xCcKlOEHgdXvqmEC67dR5qf2btH6dtWoB3-Z6QPsS6tPTQ==";
let priv_key = "gMlIhURVXU8uPube20Xr8E1K11g-3qZxOj1riThHqt-rBx72MPq5ivT1rr28cE9mzOmsXi\
bbsuBuQKYDvF7hGICRB3ROSPePYhcupV3j7XiXUIYjWNw9hvylHXK~nTT7jkpIBazBJZfr\
LJPcDZTDB0YnCOHOL-KFn4N1R5B22g0iYRABN~O10AUjQmf1epklAXPqYlzmOYeJSfTPBI\
E44nEccWJp0M0KynhKVbDI0v9VYm6sPFK7WrzRyWwHL~r735wiRkwywuMmKJtA7-PuJjcW\
NLkJwx6WScH2msMzhzYPi8JSZJBl~PosX934l-L0T-KNV4jg1Ih6yoCnm1748A==";
struct TestVector<'a> {
msg: &'a str,
ct: &'a str,
};
let test_vectors = vec![
TestVector {
msg: "",
ct: "AMfISa8KvTpaC7KXZzSvC2axyiSk0xPexBAf29yU~IKq21DzaU19wQcGJg-ktpG4hjGSg7\
u-mJ07b61yo-EGmVGZsv3nYuQYW-GjvsZQa9nm98VljlMtWrxu7TsRXw~SQlWQxMvthqJB\
1A7Y7Qa~C7-UlRytkD-cpVdgUfM-esuMWmjGs6Vc33N5U-tce5Fywa-9y7PSn3ukBO8KGR\
wm7T12~H2gvhgxrVeK2roOzsV7f5dGkvBQRZJ309Vg3j0kjaxWutgI3vli0pzDbSK9d5NR\
-GUDtdOb6IIfLiOckBegcv6I-wlSXjYJe8mIoaK45Ok3rEpHwWKVKS2MeuI7AmsAWgkQmW\
f8irmZaKc9X910VWSO5GYu6006hSc~r2TL3O7vwtW-Z9Oq~sAam9av1PPVJzAx8A4g~m~1\
avtNnncwlChsGo6mZHXqz-QMdMJXXP57f4bx36ZomkvpM-ZLlFAn-a~42KQJAApo4LfEyk\
7DPY2aTXL9ArOCNQIQB4f8QLyjvAvu6M3jzCoGo0wVX6oePfdiokGflriYOcD8rL4NbnCP\
~MSnVzC8LKyRzQVN1tDYj8~njuFqekls6En8KFJ-qgtL4PiYxbnBQDUPoW6y61m-S9r9e9\
y8qWd6~YtdAHAxVlw287~HEp9r7kqI-cjdo1337b7~5dm83KK45g5Nfw==",
},
TestVector {
msg: "hello world",
ct: "AIrd65mG1FJ~9J-DDSyhryVejJBSIjYOqV3GYmHDWgwLchTwq-bJS7dub3ENk9MZ-C6FIN\
gjUFRaLBtfwJnySmNf8pIf1srmgdfqGV2h77ufG5Gs0jggKPmPV~7Z1kTcgsqpL8MyrfXr\
Gi86X5ey-T0SZSFc0X1EhaE-47WlyWaGf-~xth6VOR~KG7clOxaOBpks-7WKZNQf7mpQRE\
4IsPJyj5p1Rf-MeDbVKbK~52IfXSuUZQ8uZr34KMoy4chjn6e-jBhM4XuaQWhsM~a3Q-zE\
pV-ea6t0bQTYfsbG9ch7pJuDPHM64o5mF9FS5-JGr7MOtfP7KDNHiYM2~-uC6BIAbiqBN8\
WSLX1mrHVuhiM-hiJ7U4oq~HYB6N~U980sCIW0dgFBbhalzzQhJQSrC1DFDqGfL5-L25mj\
ArP8dtvN0JY3LSnbcsm-pT9ttFHCPGomLfaAuP7ohknBoXK0j9e6~splg5sUA9TfLeBfqc\
Lr0Sf8b3l~PvmrVkbVcaE8yUqSS6JFdt3pavjyyAQSmSlb2jVNKGPlrov5QLzlbH7G~AUv\
IehsbGQX5ptRROtSojN~iYx3WQTOa-JLEC-AL7RbRu6B62p9I0pD0JgbUfCc4C4l9E9W~s\
MuaJLAXxh0b2miF7C5bzZHxbt~MtZ7Ho5qpZMitXyoE3icb43B6Y1sbA==",
},
TestVector {
msg: "1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
1234567890123456789012345678901234567890123456789012345678901234567890\
123456789012",
ct: "ACjb0FkTIQbnEzCZlYXGxekznfJad5uW~F5Mbu~0wtsI1O2veqdr7Mb0N754xdIz7929Ti\
1Kz-CxVEAkb3RBbVNcYHLfjy23oQ4BCioDKQaJcdkJqXa~Orm7Ta2tbkhM1Mx05MDrQaVF\
gCVXtwTsPSLVK8VwScjPIFLXgQqqZ5osq~WhaMcYe2I2RCQLOx2VzaKbT21MMbtF70a-nK\
WovkRUNfJEPeJosFwF2duAD0BHHrPiryK9BPDhyOiyN82ahOi2uim1Nt5yhlP3xo7cLV2p\
6kTlR1BNC5pYjtsvetZf6wk-solNUrJWIzcuc18uRDNH5K90GTL6FXPMSulM~E4ATRQfhZ\
fkW9xCrBIaIQM49ms2wONsp7fvI07b1r0rt7ZwCFOFit1HSAKl8UpsAYu-EsIO1qAK7vvO\
UV~0OuBXkMZEyJT-uIVfbE~xrwPE0zPYE~parSVQgi~yNQBxukUM1smAM5xXVvJu8GjmE-\
kJZw1cxaYLGsJjDHDk4HfEsyQVVPZ0V3bQvhB1tg5cCsTH~VNjts4taDTPWfDZmjtVaxxr\
PRII4NEDKqEzg3JBevM~yft-RDfMc8RVlm-gCGANrRQORFii7uD3o9~y~4P2tLnO7Fy3m5\
rdjRsOsWnCQZzw37mcBoT9rEZPrVpD8pjebJ1~HNc764xIpXDWVt8CbA==",
},
TestVector {
msg: "\0x00",
ct: "AHDZBKiWeaIYQS9R1l70IlRnoplwKTkLP2dLlXmVh1gB33kx65uX8OMb3hdZEO0Bbzxkkx\
quqlNn5w166nJO4nPbpEzVfgtY4ClUuv~W4H4CXBr0FcZM1COAkd6rtp6~lUp7cZ8FAkpH\
spl95IxlFM-F1HwiPcbmTjRO1AwCal4sH8S5WmJCvBU6jH6pBPo~9B9vAtP7vX1EwsG2Jf\
CQXkVkfvbWpSicbsWn77aECedS3HkIMrXrxojp7gAiPgQhX4NR387rcUPFsMHGeUraTUPZ\
D7ctk5tpUuYYwRQc5cRKHa4zOq~AQyljx5w5~FByLda--6yCe7qDcILyTygudJ4AHRs1pJ\
RU3uuRTHZx0XJQo~cPsoQ2piAOohITX9~yMCimCgv2EIhY3Z-mAgo8qQ4iMbItoE1cl93I\
u2YV2n4wMq9laBx0shuKOJqO3rjRnszzCbqMuFAXfc3KgGDEaCpI7049s3i2yIcv4vT9uU\
AlrM-dsrdw0JgJiFYl0JXh~TO0IyrcVcLpgZYgRhEvTAdkDNwTs-2GK4tzdPEd34os4a2c\
DPL8joh3jhp~eGoRzrpcdRekxENdzheL4w3wD1fJ9W2-leil1FH6EPc3FSL6e~nqbw69gN\
bsuXAMQ6CobukJdJEy37uKmEw4v6WPyfYMUUacchv1JoNfkHLpnAWifQ==",
},
TestVector {
msg: "\0x00\0x00\0x00",
ct: "AGwvKAMJcPAliP-n7F0Rrj0JMRaFGjww~zvBjyzc~SPJrBF831cMqZFRmMHotgA7S5BrH2\
6CL8okI2N-7as0F2l7OPx50dFEwSVSjqBjVV6SGRFC8oS-ii1FURMz2SCHSaj6kazAYq4s\
DwyqR7vnUrOtPnZujHSU~a02jinyn-QOaHkxRiUp-Oo0jlZiU5xomXgLdkhtuz6725WUDj\
3uVlMtIYfeKQsTdasujHe1oQhUmp58jfg5vgZ8g87cY8rn4p9DRwDBBuo6vi5on7T13sGx\
tY9wz6HTpwzDhEqpNrj~h4JibElfi0Jo8ZllmNTO1ZCNpUQgASoTtyFLD5rk6cIAMK0R7A\
7hjB0aelKM-V7AHkj-Fhrcm8xIgWhKaLn2wKbVNpAkllkiLALyfWJ9dhJ804RWQTMPE-GD\
kBMIFOOJ9MhpEN533OBQDwUKcoxMjl0zOMNCLx8IdCE6cLtUDKJXLB0atnDpLkBer6FwXP\
81EvKDYhtp1GsbiKvZDt8LSPJQnm2EdA3Pr9fpAisJ5Ocaxlfa6~uQCuqGA9nJ9n6w03u-\
ZpSMhSh4zm2s1MqijmaJRc-QNKmN~u1hh3R2hwWNi7FoStMA87sutEBXMdFI8un7StHNSE\
iCYwmmW2Nu3djkM-X8gGjSsdrphTU7uOXb | _basic(&self, | identifier_name |
nelder_mead.rs | //! Adaptive nelder-mead simplex algorithm.
//!
//! # References
//!
//! - [Implementing the Nelder-Mead simplex algorithm with adaptive parameters][ANMS]
//! - [Nelder-Mead algorithm](http://var.scholarpedia.org/article/Nelder-Mead_algorithm)
//! - [Nelder-Mead Method (Wikipedia)](https://en.wikipedia.org/wiki/Nelder–Mead_method)
//!
//! [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
use crate::domains::ContinuousDomain;
use crate::{ErrorKind, IdGen, Obs, ObsId, Optimizer, Result};
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::EPSILON;
/// An optimizer based on [Adaptive Nelder-Mead Simplex (ANMS)][ANMS] algorithm.
///
/// [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
#[derive(Debug)]
pub struct NelderMeadOptimizer<V> {
params_domain: Vec<ContinuousDomain>,
simplex: Vec<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { x0 })
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
| fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
}
State::Shrink { index } => {
let index = *index;
self.shrink_ask(index)
}
};
let x = self.adjust(x);
let obs = track!(Obs::new(idg, x))?;
self.evaluating = Some(obs.id);
Ok(obs)
}
fn tell(&mut self, obs: Obs<Self::Param, Self::Value>) -> Result<()> {
track_assert_eq!(self.evaluating, Some(obs.id), ErrorKind::UnknownObservation);
self.evaluating = None;
match std::mem::replace(&mut self.state, State::Initialize) {
State::Initialize => {
self.initial_tell(obs);
}
State::Reflect => {
self.reflect_tell(obs);
}
State::Expand(prev) => {
self.expand_tell(prev, obs);
}
State::ContractOutside(prev) => {
self.contract_outside_tell(prev, obs);
}
State::ContractInside(prev) => {
self.contract_inside_tell(prev, obs);
}
State::Shrink { index } => {
self.shrink_tell(obs, index);
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
enum State<V> {
Initialize,
Reflect,
Expand(Obs<Vec<f64>, V>),
ContractOutside(Obs<Vec<f64>, V>),
ContractInside(Obs<Vec<f64>, V>),
Shrink { index: usize },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::domains::ContinuousDomain;
use crate::generators::SerialIdGenerator;
use ordered_float::NotNan;
use rand;
use trackable::result::TopLevelResult;
fn objective(param: &[f64]) -> f64 {
param[0].powi(2) - param[1]
}
#[test]
fn nelder_mead_optimizer_works() -> TopLevelResult {
let params_domain = vec![
ContinuousDomain::new(0.0, 100.0)?,
ContinuousDomain::new(0.0, 100.0)?,
];
let mut optimizer = NelderMeadOptimizer::with_initial_point(params_domain, &[10.0, 20.0])?;
let mut rng = rand::thread_rng();
let mut idg = SerialIdGenerator::new();
for i in 0..100 {
let obs = optimizer.ask(&mut rng, &mut idg)?;
let value = objective(&obs.param);
println!("[{}] param={:?}, value={}", i, obs.param, value);
optimizer
.tell(obs.map_value(|_| NotNan::new(value).unwrap_or_else(|e| panic!("{}", e))))?;
}
Ok(())
}
}
| self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
| identifier_body |
nelder_mead.rs | //! Adaptive nelder-mead simplex algorithm.
//!
//! # References
//!
//! - [Implementing the Nelder-Mead simplex algorithm with adaptive parameters][ANMS]
//! - [Nelder-Mead algorithm](http://var.scholarpedia.org/article/Nelder-Mead_algorithm)
//! - [Nelder-Mead Method (Wikipedia)](https://en.wikipedia.org/wiki/Nelder–Mead_method)
//!
//! [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
use crate::domains::ContinuousDomain;
use crate::{ErrorKind, IdGen, Obs, ObsId, Optimizer, Result};
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::EPSILON;
/// An optimizer based on [Adaptive Nelder-Mead Simplex (ANMS)][ANMS] algorithm.
///
/// [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
#[derive(Debug)]
pub struct NelderMeadOptimizer<V> {
params_domain: Vec<ContinuousDomain>,
simplex: Vec<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { | .collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
}
State::Shrink { index } => {
let index = *index;
self.shrink_ask(index)
}
};
let x = self.adjust(x);
let obs = track!(Obs::new(idg, x))?;
self.evaluating = Some(obs.id);
Ok(obs)
}
fn tell(&mut self, obs: Obs<Self::Param, Self::Value>) -> Result<()> {
track_assert_eq!(self.evaluating, Some(obs.id), ErrorKind::UnknownObservation);
self.evaluating = None;
match std::mem::replace(&mut self.state, State::Initialize) {
State::Initialize => {
self.initial_tell(obs);
}
State::Reflect => {
self.reflect_tell(obs);
}
State::Expand(prev) => {
self.expand_tell(prev, obs);
}
State::ContractOutside(prev) => {
self.contract_outside_tell(prev, obs);
}
State::ContractInside(prev) => {
self.contract_inside_tell(prev, obs);
}
State::Shrink { index } => {
self.shrink_tell(obs, index);
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
enum State<V> {
Initialize,
Reflect,
Expand(Obs<Vec<f64>, V>),
ContractOutside(Obs<Vec<f64>, V>),
ContractInside(Obs<Vec<f64>, V>),
Shrink { index: usize },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::domains::ContinuousDomain;
use crate::generators::SerialIdGenerator;
use ordered_float::NotNan;
use rand;
use trackable::result::TopLevelResult;
fn objective(param: &[f64]) -> f64 {
param[0].powi(2) - param[1]
}
#[test]
fn nelder_mead_optimizer_works() -> TopLevelResult {
let params_domain = vec![
ContinuousDomain::new(0.0, 100.0)?,
ContinuousDomain::new(0.0, 100.0)?,
];
let mut optimizer = NelderMeadOptimizer::with_initial_point(params_domain, &[10.0, 20.0])?;
let mut rng = rand::thread_rng();
let mut idg = SerialIdGenerator::new();
for i in 0..100 {
let obs = optimizer.ask(&mut rng, &mut idg)?;
let value = objective(&obs.param);
println!("[{}] param={:?}, value={}", i, obs.param, value);
optimizer
.tell(obs.map_value(|_| NotNan::new(value).unwrap_or_else(|e| panic!("{}", e))))?;
}
Ok(())
}
}
| x0 })
| conditional_block |
nelder_mead.rs | //! Adaptive nelder-mead simplex algorithm.
//!
//! # References
//!
//! - [Implementing the Nelder-Mead simplex algorithm with adaptive parameters][ANMS]
//! - [Nelder-Mead algorithm](http://var.scholarpedia.org/article/Nelder-Mead_algorithm)
//! - [Nelder-Mead Method (Wikipedia)](https://en.wikipedia.org/wiki/Nelder–Mead_method)
//!
//! [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
use crate::domains::ContinuousDomain;
use crate::{ErrorKind, IdGen, Obs, ObsId, Optimizer, Result};
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::EPSILON;
/// An optimizer based on [Adaptive Nelder-Mead Simplex (ANMS)][ANMS] algorithm.
///
/// [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
#[derive(Debug)]
pub struct Ne | > {
params_domain: Vec<ContinuousDomain>,
simplex: Vec<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { x0 })
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value {
self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else {
self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
}
State::Shrink { index } => {
let index = *index;
self.shrink_ask(index)
}
};
let x = self.adjust(x);
let obs = track!(Obs::new(idg, x))?;
self.evaluating = Some(obs.id);
Ok(obs)
}
fn tell(&mut self, obs: Obs<Self::Param, Self::Value>) -> Result<()> {
track_assert_eq!(self.evaluating, Some(obs.id), ErrorKind::UnknownObservation);
self.evaluating = None;
match std::mem::replace(&mut self.state, State::Initialize) {
State::Initialize => {
self.initial_tell(obs);
}
State::Reflect => {
self.reflect_tell(obs);
}
State::Expand(prev) => {
self.expand_tell(prev, obs);
}
State::ContractOutside(prev) => {
self.contract_outside_tell(prev, obs);
}
State::ContractInside(prev) => {
self.contract_inside_tell(prev, obs);
}
State::Shrink { index } => {
self.shrink_tell(obs, index);
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
enum State<V> {
Initialize,
Reflect,
Expand(Obs<Vec<f64>, V>),
ContractOutside(Obs<Vec<f64>, V>),
ContractInside(Obs<Vec<f64>, V>),
Shrink { index: usize },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::domains::ContinuousDomain;
use crate::generators::SerialIdGenerator;
use ordered_float::NotNan;
use rand;
use trackable::result::TopLevelResult;
fn objective(param: &[f64]) -> f64 {
param[0].powi(2) - param[1]
}
#[test]
fn nelder_mead_optimizer_works() -> TopLevelResult {
let params_domain = vec![
ContinuousDomain::new(0.0, 100.0)?,
ContinuousDomain::new(0.0, 100.0)?,
];
let mut optimizer = NelderMeadOptimizer::with_initial_point(params_domain, &[10.0, 20.0])?;
let mut rng = rand::thread_rng();
let mut idg = SerialIdGenerator::new();
for i in 0..100 {
let obs = optimizer.ask(&mut rng, &mut idg)?;
let value = objective(&obs.param);
println!("[{}] param={:?}, value={}", i, obs.param, value);
optimizer
.tell(obs.map_value(|_| NotNan::new(value).unwrap_or_else(|e| panic!("{}", e))))?;
}
Ok(())
}
}
| lderMeadOptimizer<V | identifier_name |
nelder_mead.rs | //! Adaptive nelder-mead simplex algorithm.
//!
//! # References
//!
//! - [Implementing the Nelder-Mead simplex algorithm with adaptive parameters][ANMS]
//! - [Nelder-Mead algorithm](http://var.scholarpedia.org/article/Nelder-Mead_algorithm)
//! - [Nelder-Mead Method (Wikipedia)](https://en.wikipedia.org/wiki/Nelder–Mead_method)
//!
//! [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
use crate::domains::ContinuousDomain;
use crate::{ErrorKind, IdGen, Obs, ObsId, Optimizer, Result};
use rand::distributions::Distribution;
use rand::Rng;
use std::f64::EPSILON;
/// An optimizer based on [Adaptive Nelder-Mead Simplex (ANMS)][ANMS] algorithm.
///
/// [ANMS]: https://link.springer.com/article/10.1007/s10589-010-9329-3
#[derive(Debug)]
pub struct NelderMeadOptimizer<V> {
params_domain: Vec<ContinuousDomain>,
simplex: Vec<Obs<Vec<f64>, V>>,
alpha: f64,
beta: f64,
gamma: f64,
delta: f64,
initial: Vec<Vec<f64>>,
centroid: Vec<f64>,
evaluating: Option<ObsId>,
state: State<V>,
}
impl<V> NelderMeadOptimizer<V>
where
V: Ord,
{
/// Makes a new `NelderMeadOptimizer`.
pub fn new<R: Rng>(params_domain: Vec<ContinuousDomain>, mut rng: R) -> Result<Self> {
let point = params_domain
.iter()
.map(|p| p.sample(&mut rng))
.collect::<Vec<_>>();
track!(Self::with_initial_point(params_domain, &point))
}
/// Makes a new `NelderMeadOptimizer` which has the given search point.
pub fn with_initial_point(params_domain: Vec<ContinuousDomain>, point: &[f64]) -> Result<Self> {
let mut initial_simplex = vec![point.to_vec()];
for i in 0..params_domain.len() {
let tau = if point[i] == 0.0 { 0.00025 } else { 0.05 };
let x = point
.iter()
.enumerate()
.map(|(j, &x0)| if i == j { x0 + tau } else { x0 })
.collect();
initial_simplex.push(x);
}
track!(Self::with_initial_simplex(params_domain, initial_simplex))
}
/// Makes a new `NelderMeadOptimizer` with the given simplex.
pub fn with_initial_simplex(
params_domain: Vec<ContinuousDomain>,
initial_simplex: Vec<Vec<f64>>,
) -> Result<Self> {
track_assert!(
params_domain.len() >= 2,
ErrorKind::InvalidInput,
"Too few dimensions: {}",
params_domain.len()
);
track_assert_eq!(
params_domain.len() + 1,
initial_simplex.len(),
ErrorKind::InvalidInput
);
let dim = params_domain.len() as f64;
Ok(Self {
params_domain,
simplex: Vec::with_capacity(initial_simplex.len()),
alpha: 1.0,
beta: 1.0 + 2.0 / dim,
gamma: 0.75 - 1.0 / (2.0 * dim),
delta: 1.0 - 1.0 / dim,
initial: initial_simplex,
centroid: Vec::new(),
evaluating: None,
state: State::Initialize,
})
}
fn dim(&self) -> usize {
self.params_domain.len()
}
fn adjust(&self, x: Vec<f64>) -> Vec<f64> {
self.params_domain
.iter()
.zip(x.into_iter())
.map(|(p, v)| {
let v = p.low().max(v);
let mut v = (p.high() - std::f64::EPSILON).min(v);
for i in 2.. {
if (v - p.high()).abs() > EPSILON {
break;
}
v -= EPSILON * f64::from(i);
}
v
})
.collect()
}
fn initial_ask(&mut self) -> Vec<f64> {
self.initial.pop().unwrap_or_else(|| unreachable!())
}
fn initial_tell(&mut self, obs: Obs<Vec<f64>, V>) {
self.simplex.push(obs);
if self.simplex.len() == self.dim() + 1 {
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.update_centroid();
self.state = State::Reflect;
}
}
fn reflect_ask(&mut self) -> Vec<f64> {
self.centroid
.iter()
.zip(self.highest().param.iter())
.map(|(&x0, &xh)| x0 + self.alpha * (x0 - xh))
.collect()
}
fn reflect_tell(&mut self, obs: Obs<Vec<f64>, V>) {
if obs.value < self.lowest().value {
self.state = State::Expand(obs);
} else if obs.value < self.second_highest().value { | self.state = State::ContractInside(obs);
}
}
fn expand_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.beta * (x - c))
.collect()
}
fn expand_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if prev.value < curr.value {
self.accept(prev);
} else {
self.accept(curr);
}
}
fn contract_outside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c + self.gamma * (x - c))
.collect()
}
fn contract_outside_tell(&mut self, prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value <= prev.value {
self.accept(curr);
} else {
self.shrink();
}
}
fn contract_inside_ask(&mut self, prev: Vec<f64>) -> Vec<f64> {
self.centroid
.iter()
.zip(prev.iter())
.map(|(&c, &x)| c - self.gamma * (x - c))
.collect()
}
fn contract_inside_tell(&mut self, _prev: Obs<Vec<f64>, V>, curr: Obs<Vec<f64>, V>) {
if curr.value < self.highest().value {
self.accept(curr);
} else {
self.shrink();
}
}
fn shrink_ask(&mut self, index: usize) -> Vec<f64> {
self.lowest()
.param
.iter()
.zip(self.simplex[index].param.iter())
.map(|(&xl, &xi)| xl + self.delta * (xi - xl))
.collect()
}
fn shrink_tell(&mut self, obs: Obs<Vec<f64>, V>, index: usize) {
self.simplex[index] = obs;
if index < self.simplex.len() - 1 {
self.state = State::Shrink { index: index + 1 };
} else {
self.update_centroid();
self.state = State::Reflect;
}
}
fn accept(&mut self, obs: Obs<Vec<f64>, V>) {
// FIXME: optimize
self.simplex.push(obs);
self.simplex.sort_by(|a, b| a.value.cmp(&b.value));
self.simplex.pop();
self.update_centroid();
self.state = State::Reflect;
}
fn shrink(&mut self) {
self.state = State::Shrink { index: 1 };
}
fn lowest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[0]
}
fn second_highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 2]
}
fn highest(&self) -> &Obs<Vec<f64>, V> {
&self.simplex[self.simplex.len() - 1]
}
fn update_centroid(&mut self) {
assert!(self.simplex.len() == self.dim() + 1);
// NOTE: We assume that `self.simplex` have been sorted by its values.
let n = self.dim();
let mut c = vec![f64::default(); n];
for t in self.simplex.iter().take(n) {
for (i, c) in c.iter_mut().enumerate() {
*c += t.param[i];
}
}
let n = n as f64;
for c in &mut c {
*c /= n;
}
self.centroid = c
}
}
impl<V> Optimizer for NelderMeadOptimizer<V>
where
V: Ord,
{
type Param = Vec<f64>;
type Value = V;
fn ask<R: Rng, G: IdGen>(&mut self, _rng: R, idg: G) -> Result<Obs<Self::Param>> {
track_assert!(self.evaluating.is_none(), ErrorKind::Other);
let x = match &self.state {
State::Initialize => self.initial_ask(),
State::Reflect => self.reflect_ask(),
State::Expand(prev) => {
let prev = prev.param.clone();
self.expand_ask(prev)
}
State::ContractOutside(prev) => {
let prev = prev.param.clone();
self.contract_outside_ask(prev)
}
State::ContractInside(prev) => {
let prev = prev.param.clone();
self.contract_inside_ask(prev)
}
State::Shrink { index } => {
let index = *index;
self.shrink_ask(index)
}
};
let x = self.adjust(x);
let obs = track!(Obs::new(idg, x))?;
self.evaluating = Some(obs.id);
Ok(obs)
}
fn tell(&mut self, obs: Obs<Self::Param, Self::Value>) -> Result<()> {
track_assert_eq!(self.evaluating, Some(obs.id), ErrorKind::UnknownObservation);
self.evaluating = None;
match std::mem::replace(&mut self.state, State::Initialize) {
State::Initialize => {
self.initial_tell(obs);
}
State::Reflect => {
self.reflect_tell(obs);
}
State::Expand(prev) => {
self.expand_tell(prev, obs);
}
State::ContractOutside(prev) => {
self.contract_outside_tell(prev, obs);
}
State::ContractInside(prev) => {
self.contract_inside_tell(prev, obs);
}
State::Shrink { index } => {
self.shrink_tell(obs, index);
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
enum State<V> {
Initialize,
Reflect,
Expand(Obs<Vec<f64>, V>),
ContractOutside(Obs<Vec<f64>, V>),
ContractInside(Obs<Vec<f64>, V>),
Shrink { index: usize },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::domains::ContinuousDomain;
use crate::generators::SerialIdGenerator;
use ordered_float::NotNan;
use rand;
use trackable::result::TopLevelResult;
fn objective(param: &[f64]) -> f64 {
param[0].powi(2) - param[1]
}
#[test]
fn nelder_mead_optimizer_works() -> TopLevelResult {
let params_domain = vec![
ContinuousDomain::new(0.0, 100.0)?,
ContinuousDomain::new(0.0, 100.0)?,
];
let mut optimizer = NelderMeadOptimizer::with_initial_point(params_domain, &[10.0, 20.0])?;
let mut rng = rand::thread_rng();
let mut idg = SerialIdGenerator::new();
for i in 0..100 {
let obs = optimizer.ask(&mut rng, &mut idg)?;
let value = objective(&obs.param);
println!("[{}] param={:?}, value={}", i, obs.param, value);
optimizer
.tell(obs.map_value(|_| NotNan::new(value).unwrap_or_else(|e| panic!("{}", e))))?;
}
Ok(())
}
} | self.accept(obs);
} else if obs.value < self.highest().value {
self.state = State::ContractOutside(obs);
} else { | random_line_split |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! A drop-in replacement for string_cache, but backed by Gecko `nsAtom`s.
use gecko_bindings::bindings::Gecko_AddRefAtom;
use gecko_bindings::bindings::Gecko_Atomize;
use gecko_bindings::bindings::Gecko_Atomize16;
use gecko_bindings::bindings::Gecko_ReleaseAtom;
use gecko_bindings::structs::{nsAtom, nsAtom_AtomKind};
use nsstring::{nsAString, nsStr};
use precomputed_hash::PrecomputedHash;
use std::ascii::AsciiExt; | use std::char::{self, DecodeUtf16};
use std::fmt::{self, Write};
use std::hash::{Hash, Hasher};
use std::iter::Cloned;
use std::mem;
use std::ops::Deref;
use std::slice;
#[macro_use]
#[allow(improper_ctypes, non_camel_case_types, missing_docs)]
pub mod atom_macro {
include!(concat!(env!("OUT_DIR"), "/gecko/atom_macro.rs"));
}
#[macro_use]
pub mod namespace;
pub use self::namespace::{Namespace, WeakNamespace};
macro_rules! local_name {
($s: tt) => { atom!($s) }
}
/// A strong reference to a Gecko atom.
#[derive(Eq, PartialEq)]
pub struct Atom(*mut WeakAtom);
/// An atom *without* a strong reference.
///
/// Only usable as `&'a WeakAtom`,
/// where `'a` is the lifetime of something that holds a strong reference to that atom.
pub struct WeakAtom(nsAtom);
/// A BorrowedAtom for Gecko is just a weak reference to a `nsAtom`, that
/// hasn't been bumped.
pub type BorrowedAtom<'a> = &'a WeakAtom;
impl Deref for Atom {
type Target = WeakAtom;
#[inline]
fn deref(&self) -> &WeakAtom {
unsafe {
&*self.0
}
}
}
impl PrecomputedHash for Atom {
#[inline]
fn precomputed_hash(&self) -> u32 {
self.get_hash()
}
}
impl Borrow<WeakAtom> for Atom {
#[inline]
fn borrow(&self) -> &WeakAtom {
self
}
}
impl Eq for WeakAtom {}
impl PartialEq for WeakAtom {
#[inline]
fn eq(&self, other: &Self) -> bool {
let weak: *const WeakAtom = self;
let other: *const WeakAtom = other;
weak == other
}
}
unsafe impl Send for Atom {}
unsafe impl Sync for Atom {}
unsafe impl Sync for WeakAtom {}
impl WeakAtom {
/// Construct a `WeakAtom` from a raw `nsAtom`.
#[inline]
pub unsafe fn new<'a>(atom: *const nsAtom) -> &'a mut Self {
&mut *(atom as *mut WeakAtom)
}
/// Clone this atom, bumping the refcount if the atom is not static.
#[inline]
pub fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
/// Get the atom hash.
#[inline]
pub fn get_hash(&self) -> u32 {
self.0.mHash
}
/// Get the atom as a slice of utf-16 chars.
#[inline]
pub fn as_slice(&self) -> &[u16] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).mString, self.len() as usize)
}
}
// NOTE: don't expose this, since it's slow, and easy to be misused.
fn chars(&self) -> DecodeUtf16<Cloned<slice::Iter<u16>>> {
char::decode_utf16(self.as_slice().iter().cloned())
}
/// Execute `cb` with the string that this atom represents.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
pub fn with_str<F, Output>(&self, cb: F) -> Output
where F: FnOnce(&str) -> Output
{
// FIXME(bholley): We should measure whether it makes more sense to
// cache the UTF-8 version in the Gecko atom table somehow.
let owned = self.to_string();
cb(&owned)
}
/// Convert this Atom into a string, decoding the UTF-16 bytes.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
#[inline]
pub fn to_string(&self) -> String {
String::from_utf16(self.as_slice()).unwrap()
}
/// Returns whether this atom is static.
#[inline]
pub fn is_static(&self) -> bool {
unsafe {
(*self.as_ptr()).mKind() == nsAtom_AtomKind::StaticAtom as u32
}
}
/// Returns the length of the atom string.
#[inline]
pub fn len(&self) -> u32 {
unsafe {
(*self.as_ptr()).mLength()
}
}
/// Returns whether this atom is the empty string.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the atom as a mutable pointer.
#[inline]
pub fn as_ptr(&self) -> *mut nsAtom {
let const_ptr: *const nsAtom = &self.0;
const_ptr as *mut nsAtom
}
/// Convert this atom to ASCII lower-case
pub fn to_ascii_lowercase(&self) -> Atom {
let slice = self.as_slice();
match slice.iter().position(|&char16| (b'A' as u16) <= char16 && char16 <= (b'Z' as u16)) {
None => self.clone(),
Some(i) => {
let mut buffer: [u16; 64] = unsafe { mem::uninitialized() };
let mut vec;
let mutable_slice = if let Some(buffer_prefix) = buffer.get_mut(..slice.len()) {
buffer_prefix.copy_from_slice(slice);
buffer_prefix
} else {
vec = slice.to_vec();
&mut vec
};
for char16 in &mut mutable_slice[i..] {
if *char16 <= 0x7F {
*char16 = (*char16 as u8).to_ascii_lowercase() as u16
}
}
Atom::from(&*mutable_slice)
}
}
}
/// Return whether two atoms are ASCII-case-insensitive matches
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
if self == other {
return true;
}
let a = self.as_slice();
let b = other.as_slice();
a.len() == b.len() && a.iter().zip(b).all(|(&a16, &b16)| {
if a16 <= 0x7F && b16 <= 0x7F {
(a16 as u8).eq_ignore_ascii_case(&(b16 as u8))
} else {
a16 == b16
}
})
}
/// Return whether this atom is an ASCII-case-insensitive match for the given string
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
self.chars().map(|r| r.map(|c: char| c.to_ascii_lowercase()))
.eq(other.chars().map(|c: char| Ok(c.to_ascii_lowercase())))
}
}
impl fmt::Debug for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko WeakAtom({:p}, {})", self, self)
}
}
impl fmt::Display for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
for c in self.chars() {
w.write_char(c.unwrap_or(char::REPLACEMENT_CHARACTER))?
}
Ok(())
}
}
impl Atom {
/// Execute a callback with the atom represented by `ptr`.
pub unsafe fn with<F, R>(ptr: *mut nsAtom, callback: F) -> R where F: FnOnce(&Atom) -> R {
let atom = Atom(WeakAtom::new(ptr));
let ret = callback(&atom);
mem::forget(atom);
ret
}
/// Creates an atom from an static atom pointer without checking in release
/// builds.
///
/// Right now it's only used by the atom macro, and ideally it should keep
/// that way, now we have sugar for is_static, creating atoms using
/// Atom::from should involve almost no overhead.
#[inline]
unsafe fn from_static(ptr: *mut nsAtom) -> Self {
let atom = Atom(ptr as *mut WeakAtom);
debug_assert!(atom.is_static(),
"Called from_static for a non-static atom!");
atom
}
/// Creates an atom from a dynamic atom pointer that has already had AddRef
/// called on it.
#[inline]
pub unsafe fn from_addrefed(ptr: *mut nsAtom) -> Self {
assert!(!ptr.is_null());
unsafe {
Atom(WeakAtom::new(ptr))
}
}
/// Convert this atom into an addrefed nsAtom pointer.
#[inline]
pub fn into_addrefed(self) -> *mut nsAtom {
let ptr = self.as_ptr();
mem::forget(self);
ptr
}
}
impl Hash for Atom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Hash for WeakAtom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Clone for Atom {
#[inline(always)]
fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
}
impl Drop for Atom {
#[inline]
fn drop(&mut self) {
if!self.is_static() {
unsafe {
Gecko_ReleaseAtom(self.as_ptr());
}
}
}
}
impl Default for Atom {
#[inline]
fn default() -> Self {
atom!("")
}
}
impl fmt::Debug for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko Atom({:p}, {})", self.0, self)
}
}
impl fmt::Display for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
unsafe {
(&*self.0).fmt(w)
}
}
}
impl<'a> From<&'a str> for Atom {
#[inline]
fn from(string: &str) -> Atom {
debug_assert!(string.len() <= u32::max_value() as usize);
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize(string.as_ptr() as *const _, string.len() as u32)
))
}
}
}
impl<'a> From<&'a [u16]> for Atom {
#[inline]
fn from(slice: &[u16]) -> Atom {
Atom::from(&*nsStr::from(slice))
}
}
impl<'a> From<&'a nsAString> for Atom {
#[inline]
fn from(string: &nsAString) -> Atom {
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize16(string)
))
}
}
}
impl<'a> From<Cow<'a, str>> for Atom {
#[inline]
fn from(string: Cow<'a, str>) -> Atom {
Atom::from(&*string)
}
}
impl From<String> for Atom {
#[inline]
fn from(string: String) -> Atom {
Atom::from(&*string)
}
}
impl From<*mut nsAtom> for Atom {
#[inline]
fn from(ptr: *mut nsAtom) -> Atom {
assert!(!ptr.is_null());
unsafe {
let ret = Atom(WeakAtom::new(ptr));
if!ret.is_static() {
Gecko_AddRefAtom(ptr);
}
ret
}
}
}
size_of_is_0!(Atom); | use std::borrow::{Cow, Borrow}; | random_line_split |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! A drop-in replacement for string_cache, but backed by Gecko `nsAtom`s.
use gecko_bindings::bindings::Gecko_AddRefAtom;
use gecko_bindings::bindings::Gecko_Atomize;
use gecko_bindings::bindings::Gecko_Atomize16;
use gecko_bindings::bindings::Gecko_ReleaseAtom;
use gecko_bindings::structs::{nsAtom, nsAtom_AtomKind};
use nsstring::{nsAString, nsStr};
use precomputed_hash::PrecomputedHash;
use std::ascii::AsciiExt;
use std::borrow::{Cow, Borrow};
use std::char::{self, DecodeUtf16};
use std::fmt::{self, Write};
use std::hash::{Hash, Hasher};
use std::iter::Cloned;
use std::mem;
use std::ops::Deref;
use std::slice;
#[macro_use]
#[allow(improper_ctypes, non_camel_case_types, missing_docs)]
pub mod atom_macro {
include!(concat!(env!("OUT_DIR"), "/gecko/atom_macro.rs"));
}
#[macro_use]
pub mod namespace;
pub use self::namespace::{Namespace, WeakNamespace};
macro_rules! local_name {
($s: tt) => { atom!($s) }
}
/// A strong reference to a Gecko atom.
#[derive(Eq, PartialEq)]
pub struct Atom(*mut WeakAtom);
/// An atom *without* a strong reference.
///
/// Only usable as `&'a WeakAtom`,
/// where `'a` is the lifetime of something that holds a strong reference to that atom.
pub struct WeakAtom(nsAtom);
/// A BorrowedAtom for Gecko is just a weak reference to a `nsAtom`, that
/// hasn't been bumped.
pub type BorrowedAtom<'a> = &'a WeakAtom;
impl Deref for Atom {
type Target = WeakAtom;
#[inline]
fn deref(&self) -> &WeakAtom {
unsafe {
&*self.0
}
}
}
impl PrecomputedHash for Atom {
#[inline]
fn precomputed_hash(&self) -> u32 {
self.get_hash()
}
}
impl Borrow<WeakAtom> for Atom {
#[inline]
fn borrow(&self) -> &WeakAtom {
self
}
}
impl Eq for WeakAtom {}
impl PartialEq for WeakAtom {
#[inline]
fn eq(&self, other: &Self) -> bool {
let weak: *const WeakAtom = self;
let other: *const WeakAtom = other;
weak == other
}
}
unsafe impl Send for Atom {}
unsafe impl Sync for Atom {}
unsafe impl Sync for WeakAtom {}
impl WeakAtom {
/// Construct a `WeakAtom` from a raw `nsAtom`.
#[inline]
pub unsafe fn new<'a>(atom: *const nsAtom) -> &'a mut Self {
&mut *(atom as *mut WeakAtom)
}
/// Clone this atom, bumping the refcount if the atom is not static.
#[inline]
pub fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
/// Get the atom hash.
#[inline]
pub fn get_hash(&self) -> u32 {
self.0.mHash
}
/// Get the atom as a slice of utf-16 chars.
#[inline]
pub fn as_slice(&self) -> &[u16] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).mString, self.len() as usize)
}
}
// NOTE: don't expose this, since it's slow, and easy to be misused.
fn chars(&self) -> DecodeUtf16<Cloned<slice::Iter<u16>>> {
char::decode_utf16(self.as_slice().iter().cloned())
}
/// Execute `cb` with the string that this atom represents.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
pub fn with_str<F, Output>(&self, cb: F) -> Output
where F: FnOnce(&str) -> Output
{
// FIXME(bholley): We should measure whether it makes more sense to
// cache the UTF-8 version in the Gecko atom table somehow.
let owned = self.to_string();
cb(&owned)
}
/// Convert this Atom into a string, decoding the UTF-16 bytes.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
#[inline]
pub fn to_string(&self) -> String {
String::from_utf16(self.as_slice()).unwrap()
}
/// Returns whether this atom is static.
#[inline]
pub fn is_static(&self) -> bool {
unsafe {
(*self.as_ptr()).mKind() == nsAtom_AtomKind::StaticAtom as u32
}
}
/// Returns the length of the atom string.
#[inline]
pub fn len(&self) -> u32 {
unsafe {
(*self.as_ptr()).mLength()
}
}
/// Returns whether this atom is the empty string.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the atom as a mutable pointer.
#[inline]
pub fn as_ptr(&self) -> *mut nsAtom {
let const_ptr: *const nsAtom = &self.0;
const_ptr as *mut nsAtom
}
/// Convert this atom to ASCII lower-case
pub fn to_ascii_lowercase(&self) -> Atom {
let slice = self.as_slice();
match slice.iter().position(|&char16| (b'A' as u16) <= char16 && char16 <= (b'Z' as u16)) {
None => self.clone(),
Some(i) => {
let mut buffer: [u16; 64] = unsafe { mem::uninitialized() };
let mut vec;
let mutable_slice = if let Some(buffer_prefix) = buffer.get_mut(..slice.len()) {
buffer_prefix.copy_from_slice(slice);
buffer_prefix
} else {
vec = slice.to_vec();
&mut vec
};
for char16 in &mut mutable_slice[i..] {
if *char16 <= 0x7F {
*char16 = (*char16 as u8).to_ascii_lowercase() as u16
}
}
Atom::from(&*mutable_slice)
}
}
}
/// Return whether two atoms are ASCII-case-insensitive matches
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
if self == other {
return true;
}
let a = self.as_slice();
let b = other.as_slice();
a.len() == b.len() && a.iter().zip(b).all(|(&a16, &b16)| {
if a16 <= 0x7F && b16 <= 0x7F {
(a16 as u8).eq_ignore_ascii_case(&(b16 as u8))
} else {
a16 == b16
}
})
}
/// Return whether this atom is an ASCII-case-insensitive match for the given string
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
self.chars().map(|r| r.map(|c: char| c.to_ascii_lowercase()))
.eq(other.chars().map(|c: char| Ok(c.to_ascii_lowercase())))
}
}
impl fmt::Debug for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko WeakAtom({:p}, {})", self, self)
}
}
impl fmt::Display for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
for c in self.chars() {
w.write_char(c.unwrap_or(char::REPLACEMENT_CHARACTER))?
}
Ok(())
}
}
impl Atom {
/// Execute a callback with the atom represented by `ptr`.
pub unsafe fn with<F, R>(ptr: *mut nsAtom, callback: F) -> R where F: FnOnce(&Atom) -> R {
let atom = Atom(WeakAtom::new(ptr));
let ret = callback(&atom);
mem::forget(atom);
ret
}
/// Creates an atom from an static atom pointer without checking in release
/// builds.
///
/// Right now it's only used by the atom macro, and ideally it should keep
/// that way, now we have sugar for is_static, creating atoms using
/// Atom::from should involve almost no overhead.
#[inline]
unsafe fn from_static(ptr: *mut nsAtom) -> Self {
let atom = Atom(ptr as *mut WeakAtom);
debug_assert!(atom.is_static(),
"Called from_static for a non-static atom!");
atom
}
/// Creates an atom from a dynamic atom pointer that has already had AddRef
/// called on it.
#[inline]
pub unsafe fn from_addrefed(ptr: *mut nsAtom) -> Self {
assert!(!ptr.is_null());
unsafe {
Atom(WeakAtom::new(ptr))
}
}
/// Convert this atom into an addrefed nsAtom pointer.
#[inline]
pub fn into_addrefed(self) -> *mut nsAtom {
let ptr = self.as_ptr();
mem::forget(self);
ptr
}
}
impl Hash for Atom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Hash for WeakAtom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Clone for Atom {
#[inline(always)]
fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
}
impl Drop for Atom {
#[inline]
fn drop(&mut self) {
if!self.is_static() {
unsafe {
Gecko_ReleaseAtom(self.as_ptr());
}
}
}
}
impl Default for Atom {
#[inline]
fn default() -> Self {
atom!("")
}
}
impl fmt::Debug for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko Atom({:p}, {})", self.0, self)
}
}
impl fmt::Display for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
unsafe {
(&*self.0).fmt(w)
}
}
}
impl<'a> From<&'a str> for Atom {
#[inline]
fn from(string: &str) -> Atom {
debug_assert!(string.len() <= u32::max_value() as usize);
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize(string.as_ptr() as *const _, string.len() as u32)
))
}
}
}
impl<'a> From<&'a [u16]> for Atom {
#[inline]
fn from(slice: &[u16]) -> Atom {
Atom::from(&*nsStr::from(slice))
}
}
impl<'a> From<&'a nsAString> for Atom {
#[inline]
fn from(string: &nsAString) -> Atom {
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize16(string)
))
}
}
}
impl<'a> From<Cow<'a, str>> for Atom {
#[inline]
fn | (string: Cow<'a, str>) -> Atom {
Atom::from(&*string)
}
}
impl From<String> for Atom {
#[inline]
fn from(string: String) -> Atom {
Atom::from(&*string)
}
}
impl From<*mut nsAtom> for Atom {
#[inline]
fn from(ptr: *mut nsAtom) -> Atom {
assert!(!ptr.is_null());
unsafe {
let ret = Atom(WeakAtom::new(ptr));
if!ret.is_static() {
Gecko_AddRefAtom(ptr);
}
ret
}
}
}
size_of_is_0!(Atom);
| from | identifier_name |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! A drop-in replacement for string_cache, but backed by Gecko `nsAtom`s.
use gecko_bindings::bindings::Gecko_AddRefAtom;
use gecko_bindings::bindings::Gecko_Atomize;
use gecko_bindings::bindings::Gecko_Atomize16;
use gecko_bindings::bindings::Gecko_ReleaseAtom;
use gecko_bindings::structs::{nsAtom, nsAtom_AtomKind};
use nsstring::{nsAString, nsStr};
use precomputed_hash::PrecomputedHash;
use std::ascii::AsciiExt;
use std::borrow::{Cow, Borrow};
use std::char::{self, DecodeUtf16};
use std::fmt::{self, Write};
use std::hash::{Hash, Hasher};
use std::iter::Cloned;
use std::mem;
use std::ops::Deref;
use std::slice;
#[macro_use]
#[allow(improper_ctypes, non_camel_case_types, missing_docs)]
pub mod atom_macro {
include!(concat!(env!("OUT_DIR"), "/gecko/atom_macro.rs"));
}
#[macro_use]
pub mod namespace;
pub use self::namespace::{Namespace, WeakNamespace};
macro_rules! local_name {
($s: tt) => { atom!($s) }
}
/// A strong reference to a Gecko atom.
#[derive(Eq, PartialEq)]
pub struct Atom(*mut WeakAtom);
/// An atom *without* a strong reference.
///
/// Only usable as `&'a WeakAtom`,
/// where `'a` is the lifetime of something that holds a strong reference to that atom.
pub struct WeakAtom(nsAtom);
/// A BorrowedAtom for Gecko is just a weak reference to a `nsAtom`, that
/// hasn't been bumped.
pub type BorrowedAtom<'a> = &'a WeakAtom;
impl Deref for Atom {
type Target = WeakAtom;
#[inline]
fn deref(&self) -> &WeakAtom {
unsafe {
&*self.0
}
}
}
impl PrecomputedHash for Atom {
#[inline]
fn precomputed_hash(&self) -> u32 {
self.get_hash()
}
}
impl Borrow<WeakAtom> for Atom {
#[inline]
fn borrow(&self) -> &WeakAtom {
self
}
}
impl Eq for WeakAtom {}
impl PartialEq for WeakAtom {
#[inline]
fn eq(&self, other: &Self) -> bool {
let weak: *const WeakAtom = self;
let other: *const WeakAtom = other;
weak == other
}
}
unsafe impl Send for Atom {}
unsafe impl Sync for Atom {}
unsafe impl Sync for WeakAtom {}
impl WeakAtom {
/// Construct a `WeakAtom` from a raw `nsAtom`.
#[inline]
pub unsafe fn new<'a>(atom: *const nsAtom) -> &'a mut Self {
&mut *(atom as *mut WeakAtom)
}
/// Clone this atom, bumping the refcount if the atom is not static.
#[inline]
pub fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
/// Get the atom hash.
#[inline]
pub fn get_hash(&self) -> u32 |
/// Get the atom as a slice of utf-16 chars.
#[inline]
pub fn as_slice(&self) -> &[u16] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).mString, self.len() as usize)
}
}
// NOTE: don't expose this, since it's slow, and easy to be misused.
fn chars(&self) -> DecodeUtf16<Cloned<slice::Iter<u16>>> {
char::decode_utf16(self.as_slice().iter().cloned())
}
/// Execute `cb` with the string that this atom represents.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
pub fn with_str<F, Output>(&self, cb: F) -> Output
where F: FnOnce(&str) -> Output
{
// FIXME(bholley): We should measure whether it makes more sense to
// cache the UTF-8 version in the Gecko atom table somehow.
let owned = self.to_string();
cb(&owned)
}
/// Convert this Atom into a string, decoding the UTF-16 bytes.
///
/// Find alternatives to this function when possible, please, since it's
/// pretty slow.
#[inline]
pub fn to_string(&self) -> String {
String::from_utf16(self.as_slice()).unwrap()
}
/// Returns whether this atom is static.
#[inline]
pub fn is_static(&self) -> bool {
unsafe {
(*self.as_ptr()).mKind() == nsAtom_AtomKind::StaticAtom as u32
}
}
/// Returns the length of the atom string.
#[inline]
pub fn len(&self) -> u32 {
unsafe {
(*self.as_ptr()).mLength()
}
}
/// Returns whether this atom is the empty string.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the atom as a mutable pointer.
#[inline]
pub fn as_ptr(&self) -> *mut nsAtom {
let const_ptr: *const nsAtom = &self.0;
const_ptr as *mut nsAtom
}
/// Convert this atom to ASCII lower-case
pub fn to_ascii_lowercase(&self) -> Atom {
let slice = self.as_slice();
match slice.iter().position(|&char16| (b'A' as u16) <= char16 && char16 <= (b'Z' as u16)) {
None => self.clone(),
Some(i) => {
let mut buffer: [u16; 64] = unsafe { mem::uninitialized() };
let mut vec;
let mutable_slice = if let Some(buffer_prefix) = buffer.get_mut(..slice.len()) {
buffer_prefix.copy_from_slice(slice);
buffer_prefix
} else {
vec = slice.to_vec();
&mut vec
};
for char16 in &mut mutable_slice[i..] {
if *char16 <= 0x7F {
*char16 = (*char16 as u8).to_ascii_lowercase() as u16
}
}
Atom::from(&*mutable_slice)
}
}
}
/// Return whether two atoms are ASCII-case-insensitive matches
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
if self == other {
return true;
}
let a = self.as_slice();
let b = other.as_slice();
a.len() == b.len() && a.iter().zip(b).all(|(&a16, &b16)| {
if a16 <= 0x7F && b16 <= 0x7F {
(a16 as u8).eq_ignore_ascii_case(&(b16 as u8))
} else {
a16 == b16
}
})
}
/// Return whether this atom is an ASCII-case-insensitive match for the given string
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
self.chars().map(|r| r.map(|c: char| c.to_ascii_lowercase()))
.eq(other.chars().map(|c: char| Ok(c.to_ascii_lowercase())))
}
}
impl fmt::Debug for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko WeakAtom({:p}, {})", self, self)
}
}
impl fmt::Display for WeakAtom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
for c in self.chars() {
w.write_char(c.unwrap_or(char::REPLACEMENT_CHARACTER))?
}
Ok(())
}
}
impl Atom {
/// Execute a callback with the atom represented by `ptr`.
pub unsafe fn with<F, R>(ptr: *mut nsAtom, callback: F) -> R where F: FnOnce(&Atom) -> R {
let atom = Atom(WeakAtom::new(ptr));
let ret = callback(&atom);
mem::forget(atom);
ret
}
/// Creates an atom from an static atom pointer without checking in release
/// builds.
///
/// Right now it's only used by the atom macro, and ideally it should keep
/// that way, now we have sugar for is_static, creating atoms using
/// Atom::from should involve almost no overhead.
#[inline]
unsafe fn from_static(ptr: *mut nsAtom) -> Self {
let atom = Atom(ptr as *mut WeakAtom);
debug_assert!(atom.is_static(),
"Called from_static for a non-static atom!");
atom
}
/// Creates an atom from a dynamic atom pointer that has already had AddRef
/// called on it.
#[inline]
pub unsafe fn from_addrefed(ptr: *mut nsAtom) -> Self {
assert!(!ptr.is_null());
unsafe {
Atom(WeakAtom::new(ptr))
}
}
/// Convert this atom into an addrefed nsAtom pointer.
#[inline]
pub fn into_addrefed(self) -> *mut nsAtom {
let ptr = self.as_ptr();
mem::forget(self);
ptr
}
}
impl Hash for Atom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Hash for WeakAtom {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash());
}
}
impl Clone for Atom {
#[inline(always)]
fn clone(&self) -> Atom {
Atom::from(self.as_ptr())
}
}
impl Drop for Atom {
#[inline]
fn drop(&mut self) {
if!self.is_static() {
unsafe {
Gecko_ReleaseAtom(self.as_ptr());
}
}
}
}
impl Default for Atom {
#[inline]
fn default() -> Self {
atom!("")
}
}
impl fmt::Debug for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
write!(w, "Gecko Atom({:p}, {})", self.0, self)
}
}
impl fmt::Display for Atom {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
unsafe {
(&*self.0).fmt(w)
}
}
}
impl<'a> From<&'a str> for Atom {
#[inline]
fn from(string: &str) -> Atom {
debug_assert!(string.len() <= u32::max_value() as usize);
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize(string.as_ptr() as *const _, string.len() as u32)
))
}
}
}
impl<'a> From<&'a [u16]> for Atom {
#[inline]
fn from(slice: &[u16]) -> Atom {
Atom::from(&*nsStr::from(slice))
}
}
impl<'a> From<&'a nsAString> for Atom {
#[inline]
fn from(string: &nsAString) -> Atom {
unsafe {
Atom(WeakAtom::new(
Gecko_Atomize16(string)
))
}
}
}
impl<'a> From<Cow<'a, str>> for Atom {
#[inline]
fn from(string: Cow<'a, str>) -> Atom {
Atom::from(&*string)
}
}
impl From<String> for Atom {
#[inline]
fn from(string: String) -> Atom {
Atom::from(&*string)
}
}
impl From<*mut nsAtom> for Atom {
#[inline]
fn from(ptr: *mut nsAtom) -> Atom {
assert!(!ptr.is_null());
unsafe {
let ret = Atom(WeakAtom::new(ptr));
if!ret.is_static() {
Gecko_AddRefAtom(ptr);
}
ret
}
}
}
size_of_is_0!(Atom);
| {
self.0.mHash
} | identifier_body |
lookup_ref_delta_objects.rs | use std::convert::TryInto;
use gix_hash::ObjectId;
use crate::data::{entry::Header, input};
/// An iterator to resolve thin packs on the fly.
pub struct LookupRefDeltaObjectsIter<I, LFn> {
/// The inner iterator whose entries we will resolve.
pub inner: I,
lookup: LFn,
/// The cached delta to provide next time we are called, it's the delta to go with the base we just resolved in its place.
next_delta: Option<input::Entry>,
/// Fuse to stop iteration after first missing object.
error: bool,
/// The overall pack-offset we accumulated thus far. Each inserted entry offsets all following
/// objects by its length. We need to determine exactly where the object was inserted to see if its affected at all.
inserted_entry_length_at_offset: Vec<Change>,
/// The sum of all entries added so far, as a cache to avoid recomputation
inserted_entries_length_in_bytes: i64,
buf: Vec<u8>,
}
impl<I, LFn> LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
/// Create a new instance wrapping `iter` and using `lookup` as function to retrieve objects that will serve as bases
/// for ref deltas seen while traversing `iter`.
pub fn new(iter: I, lookup: LFn) -> Self {
LookupRefDeltaObjectsIter {
inner: iter,
lookup,
error: false,
inserted_entry_length_at_offset: Vec::new(),
inserted_entries_length_in_bytes: 0,
next_delta: None,
buf: Vec::new(),
}
}
fn shifted_pack_offset(&self, pack_offset: u64) -> u64 {
let new_ofs = pack_offset as i64 + self.inserted_entries_length_in_bytes;
new_ofs.try_into().expect("offset value is never becomes negative")
}
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
fn track_change(
&mut self,
shifted_pack_offset: u64,
pack_offset: u64,
size_change: i64,
oid: impl Into<Option<ObjectId>>,
) {
if size_change == 0 {
return;
}
self.inserted_entry_length_at_offset.push(Change {
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
oid: oid.into().unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
self.inserted_entries_length_in_bytes += size_change;
}
fn shift_entry_and_point_to_base_by_offset(&mut self, entry: &mut input::Entry, base_distance: u64) {
let pack_offset = entry.pack_offset;
entry.pack_offset = self.shifted_pack_offset(pack_offset);
entry.header = Header::OfsDelta { base_distance };
let previous_header_size = entry.header_size;
entry.header_size = entry.header.size(entry.decompressed_size) as u16;
let change = entry.header_size as i64 - previous_header_size as i64;
entry.crc32 = Some(entry.compute_crc32());
self.track_change(entry.pack_offset, pack_offset, change, None);
}
}
impl<I, LFn> Iterator for LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
type Item = Result<input::Entry, input::Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.error {
return None;
}
if let Some(delta) = self.next_delta.take() {
return Some(Ok(delta));
}
match self.inner.next() {
Some(Ok(mut entry)) => match entry.header {
Header::RefDelta { base_id } => {
match self.inserted_entry_length_at_offset.iter().rfind(|e| e.oid == base_id) {
None => {
let base_entry = match (self.lookup)(base_id, &mut self.buf) {
Some(obj) => {
let current_pack_offset = entry.pack_offset;
let mut entry = match input::Entry::from_data_obj(&obj, 0) {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
entry.pack_offset = self.shifted_pack_offset(current_pack_offset);
self.track_change(
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
base_id,
);
entry
}
None => {
self.error = true;
return Some(Err(input::Error::NotFound { object_id: base_id }));
}
};
{
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_entry.bytes_in_pack());
self.next_delta = Some(entry);
}
Some(Ok(base_entry))
} | Some(Ok(entry))
}
}
}
_ => {
if self.inserted_entries_length_in_bytes!= 0 {
if let Header::OfsDelta { base_distance } = entry.header {
// We have to find the new distance based on the previous distance to the base, using the absolute
// pack offset computed from it as stored in `base_pack_offset`.
let base_pack_offset = entry
.pack_offset
.checked_sub(base_distance)
.expect("distance to be in range of pack");
match self
.inserted_entry_length_at_offset
.binary_search_by_key(&base_pack_offset, |c| c.pack_offset)
{
Ok(index) => {
let index = {
let maybe_index_of_actual_entry = index + 1;
self.inserted_entry_length_at_offset
.get(maybe_index_of_actual_entry)
.and_then(|c| {
(c.pack_offset == base_pack_offset)
.then_some(maybe_index_of_actual_entry)
})
.unwrap_or(index)
};
let new_distance = self
.shifted_pack_offset(entry.pack_offset)
.checked_sub(self.inserted_entry_length_at_offset[index].shifted_pack_offset)
.expect("a base that is behind us in the pack");
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
Err(index) => {
let change_since_offset = self.inserted_entry_length_at_offset[index..]
.iter()
.map(|c| c.size_change_in_bytes)
.sum::<i64>();
let new_distance: u64 = {
(base_distance as i64 + change_since_offset)
.try_into()
.expect("it still points behind us")
};
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
}
} else {
// Offset this entry by all changes (positive or negative) that we saw thus far.
entry.pack_offset = self.shifted_pack_offset(entry.pack_offset);
}
}
Some(Ok(entry))
}
},
other => other,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (min, max) = self.inner.size_hint();
max.map_or_else(|| (min * 2, None), |max| (min, Some(max * 2)))
}
}
#[derive(Debug)]
struct Change {
/// The original pack offset as mentioned in the entry we saw. This is used to find this as base object if deltas refer to it by
/// old offset.
pack_offset: u64,
/// The new pack offset that is the shifted location of the pack entry in the pack.
shifted_pack_offset: u64,
/// The size change of the entry header, negative values denote shrinking, positive denote growing.
size_change_in_bytes: i64,
/// The object id of the entry responsible for the change, or null if it's an entry just for tracking an insertion.
oid: ObjectId,
} | Some(base_entry) => {
let base_distance =
self.shifted_pack_offset(entry.pack_offset) - base_entry.shifted_pack_offset;
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_distance); | random_line_split |
lookup_ref_delta_objects.rs | use std::convert::TryInto;
use gix_hash::ObjectId;
use crate::data::{entry::Header, input};
/// An iterator to resolve thin packs on the fly.
pub struct LookupRefDeltaObjectsIter<I, LFn> {
/// The inner iterator whose entries we will resolve.
pub inner: I,
lookup: LFn,
/// The cached delta to provide next time we are called, it's the delta to go with the base we just resolved in its place.
next_delta: Option<input::Entry>,
/// Fuse to stop iteration after first missing object.
error: bool,
/// The overall pack-offset we accumulated thus far. Each inserted entry offsets all following
/// objects by its length. We need to determine exactly where the object was inserted to see if its affected at all.
inserted_entry_length_at_offset: Vec<Change>,
/// The sum of all entries added so far, as a cache to avoid recomputation
inserted_entries_length_in_bytes: i64,
buf: Vec<u8>,
}
impl<I, LFn> LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
/// Create a new instance wrapping `iter` and using `lookup` as function to retrieve objects that will serve as bases
/// for ref deltas seen while traversing `iter`.
pub fn new(iter: I, lookup: LFn) -> Self {
LookupRefDeltaObjectsIter {
inner: iter,
lookup,
error: false,
inserted_entry_length_at_offset: Vec::new(),
inserted_entries_length_in_bytes: 0,
next_delta: None,
buf: Vec::new(),
}
}
fn shifted_pack_offset(&self, pack_offset: u64) -> u64 {
let new_ofs = pack_offset as i64 + self.inserted_entries_length_in_bytes;
new_ofs.try_into().expect("offset value is never becomes negative")
}
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
fn track_change(
&mut self,
shifted_pack_offset: u64,
pack_offset: u64,
size_change: i64,
oid: impl Into<Option<ObjectId>>,
) {
if size_change == 0 {
return;
}
self.inserted_entry_length_at_offset.push(Change {
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
oid: oid.into().unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
self.inserted_entries_length_in_bytes += size_change;
}
fn shift_entry_and_point_to_base_by_offset(&mut self, entry: &mut input::Entry, base_distance: u64) {
let pack_offset = entry.pack_offset;
entry.pack_offset = self.shifted_pack_offset(pack_offset);
entry.header = Header::OfsDelta { base_distance };
let previous_header_size = entry.header_size;
entry.header_size = entry.header.size(entry.decompressed_size) as u16;
let change = entry.header_size as i64 - previous_header_size as i64;
entry.crc32 = Some(entry.compute_crc32());
self.track_change(entry.pack_offset, pack_offset, change, None);
}
}
impl<I, LFn> Iterator for LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
type Item = Result<input::Entry, input::Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.error {
return None;
}
if let Some(delta) = self.next_delta.take() {
return Some(Ok(delta));
}
match self.inner.next() {
Some(Ok(mut entry)) => match entry.header {
Header::RefDelta { base_id } => {
match self.inserted_entry_length_at_offset.iter().rfind(|e| e.oid == base_id) {
None => {
let base_entry = match (self.lookup)(base_id, &mut self.buf) {
Some(obj) => {
let current_pack_offset = entry.pack_offset;
let mut entry = match input::Entry::from_data_obj(&obj, 0) {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
entry.pack_offset = self.shifted_pack_offset(current_pack_offset);
self.track_change(
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
base_id,
);
entry
}
None => {
self.error = true;
return Some(Err(input::Error::NotFound { object_id: base_id }));
}
};
{
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_entry.bytes_in_pack());
self.next_delta = Some(entry);
}
Some(Ok(base_entry))
}
Some(base_entry) => {
let base_distance =
self.shifted_pack_offset(entry.pack_offset) - base_entry.shifted_pack_offset;
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_distance);
Some(Ok(entry))
}
}
}
_ => {
if self.inserted_entries_length_in_bytes!= 0 {
if let Header::OfsDelta { base_distance } = entry.header {
// We have to find the new distance based on the previous distance to the base, using the absolute
// pack offset computed from it as stored in `base_pack_offset`.
let base_pack_offset = entry
.pack_offset
.checked_sub(base_distance)
.expect("distance to be in range of pack");
match self
.inserted_entry_length_at_offset
.binary_search_by_key(&base_pack_offset, |c| c.pack_offset)
{
Ok(index) => {
let index = {
let maybe_index_of_actual_entry = index + 1;
self.inserted_entry_length_at_offset
.get(maybe_index_of_actual_entry)
.and_then(|c| {
(c.pack_offset == base_pack_offset)
.then_some(maybe_index_of_actual_entry)
})
.unwrap_or(index)
};
let new_distance = self
.shifted_pack_offset(entry.pack_offset)
.checked_sub(self.inserted_entry_length_at_offset[index].shifted_pack_offset)
.expect("a base that is behind us in the pack");
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
Err(index) => {
let change_since_offset = self.inserted_entry_length_at_offset[index..]
.iter()
.map(|c| c.size_change_in_bytes)
.sum::<i64>();
let new_distance: u64 = {
(base_distance as i64 + change_since_offset)
.try_into()
.expect("it still points behind us")
};
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
}
} else {
// Offset this entry by all changes (positive or negative) that we saw thus far.
entry.pack_offset = self.shifted_pack_offset(entry.pack_offset);
}
}
Some(Ok(entry))
}
},
other => other,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (min, max) = self.inner.size_hint();
max.map_or_else(|| (min * 2, None), |max| (min, Some(max * 2)))
}
}
#[derive(Debug)]
struct | {
/// The original pack offset as mentioned in the entry we saw. This is used to find this as base object if deltas refer to it by
/// old offset.
pack_offset: u64,
/// The new pack offset that is the shifted location of the pack entry in the pack.
shifted_pack_offset: u64,
/// The size change of the entry header, negative values denote shrinking, positive denote growing.
size_change_in_bytes: i64,
/// The object id of the entry responsible for the change, or null if it's an entry just for tracking an insertion.
oid: ObjectId,
}
| Change | identifier_name |
lookup_ref_delta_objects.rs | use std::convert::TryInto;
use gix_hash::ObjectId;
use crate::data::{entry::Header, input};
/// An iterator to resolve thin packs on the fly.
pub struct LookupRefDeltaObjectsIter<I, LFn> {
/// The inner iterator whose entries we will resolve.
pub inner: I,
lookup: LFn,
/// The cached delta to provide next time we are called, it's the delta to go with the base we just resolved in its place.
next_delta: Option<input::Entry>,
/// Fuse to stop iteration after first missing object.
error: bool,
/// The overall pack-offset we accumulated thus far. Each inserted entry offsets all following
/// objects by its length. We need to determine exactly where the object was inserted to see if its affected at all.
inserted_entry_length_at_offset: Vec<Change>,
/// The sum of all entries added so far, as a cache to avoid recomputation
inserted_entries_length_in_bytes: i64,
buf: Vec<u8>,
}
impl<I, LFn> LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
/// Create a new instance wrapping `iter` and using `lookup` as function to retrieve objects that will serve as bases
/// for ref deltas seen while traversing `iter`.
pub fn new(iter: I, lookup: LFn) -> Self {
LookupRefDeltaObjectsIter {
inner: iter,
lookup,
error: false,
inserted_entry_length_at_offset: Vec::new(),
inserted_entries_length_in_bytes: 0,
next_delta: None,
buf: Vec::new(),
}
}
fn shifted_pack_offset(&self, pack_offset: u64) -> u64 {
let new_ofs = pack_offset as i64 + self.inserted_entries_length_in_bytes;
new_ofs.try_into().expect("offset value is never becomes negative")
}
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
fn track_change(
&mut self,
shifted_pack_offset: u64,
pack_offset: u64,
size_change: i64,
oid: impl Into<Option<ObjectId>>,
) {
if size_change == 0 {
return;
}
self.inserted_entry_length_at_offset.push(Change {
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
oid: oid.into().unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
self.inserted_entries_length_in_bytes += size_change;
}
fn shift_entry_and_point_to_base_by_offset(&mut self, entry: &mut input::Entry, base_distance: u64) {
let pack_offset = entry.pack_offset;
entry.pack_offset = self.shifted_pack_offset(pack_offset);
entry.header = Header::OfsDelta { base_distance };
let previous_header_size = entry.header_size;
entry.header_size = entry.header.size(entry.decompressed_size) as u16;
let change = entry.header_size as i64 - previous_header_size as i64;
entry.crc32 = Some(entry.compute_crc32());
self.track_change(entry.pack_offset, pack_offset, change, None);
}
}
impl<I, LFn> Iterator for LookupRefDeltaObjectsIter<I, LFn>
where
I: Iterator<Item = Result<input::Entry, input::Error>>,
LFn: for<'a> FnMut(ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>,
{
type Item = Result<input::Entry, input::Error>;
fn next(&mut self) -> Option<Self::Item> {
if self.error {
return None;
}
if let Some(delta) = self.next_delta.take() {
return Some(Ok(delta));
}
match self.inner.next() {
Some(Ok(mut entry)) => match entry.header {
Header::RefDelta { base_id } => {
match self.inserted_entry_length_at_offset.iter().rfind(|e| e.oid == base_id) {
None => {
let base_entry = match (self.lookup)(base_id, &mut self.buf) {
Some(obj) => {
let current_pack_offset = entry.pack_offset;
let mut entry = match input::Entry::from_data_obj(&obj, 0) {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
entry.pack_offset = self.shifted_pack_offset(current_pack_offset);
self.track_change(
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
base_id,
);
entry
}
None => {
self.error = true;
return Some(Err(input::Error::NotFound { object_id: base_id }));
}
};
{
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_entry.bytes_in_pack());
self.next_delta = Some(entry);
}
Some(Ok(base_entry))
}
Some(base_entry) => {
let base_distance =
self.shifted_pack_offset(entry.pack_offset) - base_entry.shifted_pack_offset;
self.shift_entry_and_point_to_base_by_offset(&mut entry, base_distance);
Some(Ok(entry))
}
}
}
_ => {
if self.inserted_entries_length_in_bytes!= 0 {
if let Header::OfsDelta { base_distance } = entry.header {
// We have to find the new distance based on the previous distance to the base, using the absolute
// pack offset computed from it as stored in `base_pack_offset`.
let base_pack_offset = entry
.pack_offset
.checked_sub(base_distance)
.expect("distance to be in range of pack");
match self
.inserted_entry_length_at_offset
.binary_search_by_key(&base_pack_offset, |c| c.pack_offset)
{
Ok(index) => {
let index = {
let maybe_index_of_actual_entry = index + 1;
self.inserted_entry_length_at_offset
.get(maybe_index_of_actual_entry)
.and_then(|c| {
(c.pack_offset == base_pack_offset)
.then_some(maybe_index_of_actual_entry)
})
.unwrap_or(index)
};
let new_distance = self
.shifted_pack_offset(entry.pack_offset)
.checked_sub(self.inserted_entry_length_at_offset[index].shifted_pack_offset)
.expect("a base that is behind us in the pack");
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
Err(index) => {
let change_since_offset = self.inserted_entry_length_at_offset[index..]
.iter()
.map(|c| c.size_change_in_bytes)
.sum::<i64>();
let new_distance: u64 = {
(base_distance as i64 + change_since_offset)
.try_into()
.expect("it still points behind us")
};
self.shift_entry_and_point_to_base_by_offset(&mut entry, new_distance);
}
}
} else {
// Offset this entry by all changes (positive or negative) that we saw thus far.
entry.pack_offset = self.shifted_pack_offset(entry.pack_offset);
}
}
Some(Ok(entry))
}
},
other => other,
}
}
fn size_hint(&self) -> (usize, Option<usize>) |
}
#[derive(Debug)]
struct Change {
/// The original pack offset as mentioned in the entry we saw. This is used to find this as base object if deltas refer to it by
/// old offset.
pack_offset: u64,
/// The new pack offset that is the shifted location of the pack entry in the pack.
shifted_pack_offset: u64,
/// The size change of the entry header, negative values denote shrinking, positive denote growing.
size_change_in_bytes: i64,
/// The object id of the entry responsible for the change, or null if it's an entry just for tracking an insertion.
oid: ObjectId,
}
| {
let (min, max) = self.inner.size_hint();
max.map_or_else(|| (min * 2, None), |max| (min, Some(max * 2)))
} | identifier_body |
lib.rs | /*
* This is NearDice contract:
*
*
*
*/
// To conserve gas, efficient serialization is achieved through Borsh (http://borsh.io/)
use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize};
use near_sdk::wee_alloc;
use near_sdk::json_types::{U64, U128};
use near_sdk::serde::{Deserialize, Serialize};
use near_sdk::{env, near_bindgen, AccountId, Balance, BlockHeight, Promise};
use near_sdk::collections::{Vector, LookupMap};
use uint::construct_uint;
construct_uint! {
/// 256-bit unsigned integer.
pub struct U256(4);
}
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Clone)]
#[serde(crate = "near_sdk::serde")]
pub struct RewardFeeFraction {
pub numerator: u32,
pub denominator: u32,
}
impl RewardFeeFraction {
pub fn assert_valid(&self) {
assert_ne!(self.denominator, 0, "Denominator must be a positive number");
assert!(
self.numerator <= self.denominator,
"The reward fee must be less or equal to 1"
);
}
pub fn multiply(&self, value: Balance) -> Balance {
(U256::from(self.numerator) * U256::from(value) / U256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn | (&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
if leftover == 0 {
self.accounts.remove(&account_id);
} else {
self.accounts.insert(&account_id, &leftover);
}
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u64) -> Vec<HumanReadableWinnerInfo> {
let counts: u64 = self.win_history.len() as u64;
(from_index..std::cmp::min(from_index + limit, counts))
.map(|index| self.get_hr_info(counts - index - 1)) // reverse to get LIFO order
.collect()
}
pub fn get_contract_info(&self) -> HumanReadableContractInfo {
HumanReadableContractInfo {
owner: self.owner_id.clone(),
jack_pod: self.jack_pod.into(),
owner_pod: self.owner_pod.into(),
dice_number: self.dice_number,
rolling_fee: self.rolling_fee.into(),
}
}
/// Returns the current reward fee as a fraction.
pub fn get_reward_fee_fraction(&self) -> RewardFeeFraction {
self.reward_fee_fraction.clone()
}
/// return user's available dice count
pub fn get_account_dice_count(&self, account_id: String) -> u8 {
let balance = self.accounts.get(&account_id.into()).unwrap_or(0);
(balance / self.rolling_fee) as u8
}
pub fn get_greeting(&self, account_id: String) -> &str {
"Hello, this method has obsolete"
}
}
/*
* The rest of this file holds the inline tests for the code above
* Learn more about Rust tests: https://doc.rust-lang.org/book/ch11-01-writing-tests.html
*
* To run from contract directory:
* cargo test -- --nocapture
*
* From project root, to run in combination with frontend tests:
* yarn test
*
*/
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::MockedBlockchain;
use near_sdk::{testing_env, VMContext};
// mock the context for testing, notice "signer_account_id" that was accessed above from env::
fn get_context(input: Vec<u8>, is_view: bool) -> VMContext {
VMContext {
current_account_id: "alice_near".to_string(),
signer_account_id: "bob_near".to_string(),
signer_account_pk: vec![0, 1, 2],
predecessor_account_id: "carol_near".to_string(),
input,
block_index: 0,
block_timestamp: 0,
account_balance: 0,
account_locked_balance: 0,
storage_usage: 0,
attached_deposit: 0,
prepaid_gas: 10u64.pow(18),
random_seed: vec![0, 1, 2],
is_view,
output_data_receivers: vec![],
epoch_height: 19,
}
}
#[test]
fn set_then_get_greeting() {
let context = get_context(vec![], false);
testing_env!(context);
let mut contract = NearDice::default();
contract.set_greeting("howdy".to_string());
assert_eq!(
"howdy".to_string(),
contract.get_greeting("bob_near".to_string())
);
}
#[test]
fn get_default_greeting() {
let context = get_context(vec![], true);
testing_env!(context);
let contract = NearDice::default();
// this test did not call set_greeting so should return the default "Hello" greeting
assert_eq!(
"Hello".to_string(),
contract.get_greeting("francis.near".to_string())
);
}
}
| roll_dice | identifier_name |
lib.rs | /*
* This is NearDice contract:
*
*
*
*/
// To conserve gas, efficient serialization is achieved through Borsh (http://borsh.io/)
use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize};
use near_sdk::wee_alloc;
use near_sdk::json_types::{U64, U128};
use near_sdk::serde::{Deserialize, Serialize};
use near_sdk::{env, near_bindgen, AccountId, Balance, BlockHeight, Promise};
use near_sdk::collections::{Vector, LookupMap};
use uint::construct_uint;
construct_uint! {
/// 256-bit unsigned integer.
pub struct U256(4);
}
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Clone)]
#[serde(crate = "near_sdk::serde")]
pub struct RewardFeeFraction {
pub numerator: u32,
pub denominator: u32,
}
impl RewardFeeFraction {
pub fn assert_valid(&self) {
assert_ne!(self.denominator, 0, "Denominator must be a positive number");
assert!(
self.numerator <= self.denominator,
"The reward fee must be less or equal to 1"
);
}
pub fn multiply(&self, value: Balance) -> Balance {
(U256::from(self.numerator) * U256::from(value) / U256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn roll_dice(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
if leftover == 0 | else {
self.accounts.insert(&account_id, &leftover);
}
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u64) -> Vec<HumanReadableWinnerInfo> {
let counts: u64 = self.win_history.len() as u64;
(from_index..std::cmp::min(from_index + limit, counts))
.map(|index| self.get_hr_info(counts - index - 1)) // reverse to get LIFO order
.collect()
}
pub fn get_contract_info(&self) -> HumanReadableContractInfo {
HumanReadableContractInfo {
owner: self.owner_id.clone(),
jack_pod: self.jack_pod.into(),
owner_pod: self.owner_pod.into(),
dice_number: self.dice_number,
rolling_fee: self.rolling_fee.into(),
}
}
/// Returns the current reward fee as a fraction.
pub fn get_reward_fee_fraction(&self) -> RewardFeeFraction {
self.reward_fee_fraction.clone()
}
/// return user's available dice count
pub fn get_account_dice_count(&self, account_id: String) -> u8 {
let balance = self.accounts.get(&account_id.into()).unwrap_or(0);
(balance / self.rolling_fee) as u8
}
pub fn get_greeting(&self, account_id: String) -> &str {
"Hello, this method has obsolete"
}
}
/*
* The rest of this file holds the inline tests for the code above
* Learn more about Rust tests: https://doc.rust-lang.org/book/ch11-01-writing-tests.html
*
* To run from contract directory:
* cargo test -- --nocapture
*
* From project root, to run in combination with frontend tests:
* yarn test
*
*/
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::MockedBlockchain;
use near_sdk::{testing_env, VMContext};
// mock the context for testing, notice "signer_account_id" that was accessed above from env::
fn get_context(input: Vec<u8>, is_view: bool) -> VMContext {
VMContext {
current_account_id: "alice_near".to_string(),
signer_account_id: "bob_near".to_string(),
signer_account_pk: vec![0, 1, 2],
predecessor_account_id: "carol_near".to_string(),
input,
block_index: 0,
block_timestamp: 0,
account_balance: 0,
account_locked_balance: 0,
storage_usage: 0,
attached_deposit: 0,
prepaid_gas: 10u64.pow(18),
random_seed: vec![0, 1, 2],
is_view,
output_data_receivers: vec![],
epoch_height: 19,
}
}
#[test]
fn set_then_get_greeting() {
let context = get_context(vec![], false);
testing_env!(context);
let mut contract = NearDice::default();
contract.set_greeting("howdy".to_string());
assert_eq!(
"howdy".to_string(),
contract.get_greeting("bob_near".to_string())
);
}
#[test]
fn get_default_greeting() {
let context = get_context(vec![], true);
testing_env!(context);
let contract = NearDice::default();
// this test did not call set_greeting so should return the default "Hello" greeting
assert_eq!(
"Hello".to_string(),
contract.get_greeting("francis.near".to_string())
);
}
}
| {
self.accounts.remove(&account_id);
} | conditional_block |
lib.rs | /*
* This is NearDice contract:
*
*
*
*/
// To conserve gas, efficient serialization is achieved through Borsh (http://borsh.io/)
use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize};
use near_sdk::wee_alloc;
use near_sdk::json_types::{U64, U128};
use near_sdk::serde::{Deserialize, Serialize};
use near_sdk::{env, near_bindgen, AccountId, Balance, BlockHeight, Promise};
use near_sdk::collections::{Vector, LookupMap};
use uint::construct_uint;
construct_uint! {
/// 256-bit unsigned integer.
pub struct U256(4);
}
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Clone)]
#[serde(crate = "near_sdk::serde")]
pub struct RewardFeeFraction {
pub numerator: u32,
pub denominator: u32,
}
impl RewardFeeFraction {
pub fn assert_valid(&self) {
assert_ne!(self.denominator, 0, "Denominator must be a positive number");
assert!(
self.numerator <= self.denominator,
"The reward fee must be less or equal to 1"
);
}
pub fn multiply(&self, value: Balance) -> Balance {
(U256::from(self.numerator) * U256::from(value) / U256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn roll_dice(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee;
if leftover == 0 {
self.accounts.remove(&account_id);
} else {
self.accounts.insert(&account_id, &leftover);
}
// always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) |
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u64) -> Vec<HumanReadableWinnerInfo> {
let counts: u64 = self.win_history.len() as u64;
(from_index..std::cmp::min(from_index + limit, counts))
.map(|index| self.get_hr_info(counts - index - 1)) // reverse to get LIFO order
.collect()
}
pub fn get_contract_info(&self) -> HumanReadableContractInfo {
HumanReadableContractInfo {
owner: self.owner_id.clone(),
jack_pod: self.jack_pod.into(),
owner_pod: self.owner_pod.into(),
dice_number: self.dice_number,
rolling_fee: self.rolling_fee.into(),
}
}
/// Returns the current reward fee as a fraction.
pub fn get_reward_fee_fraction(&self) -> RewardFeeFraction {
self.reward_fee_fraction.clone()
}
/// return user's available dice count
pub fn get_account_dice_count(&self, account_id: String) -> u8 {
let balance = self.accounts.get(&account_id.into()).unwrap_or(0);
(balance / self.rolling_fee) as u8
}
pub fn get_greeting(&self, account_id: String) -> &str {
"Hello, this method has obsolete"
}
}
/*
* The rest of this file holds the inline tests for the code above
* Learn more about Rust tests: https://doc.rust-lang.org/book/ch11-01-writing-tests.html
*
* To run from contract directory:
* cargo test -- --nocapture
*
* From project root, to run in combination with frontend tests:
* yarn test
*
*/
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::MockedBlockchain;
use near_sdk::{testing_env, VMContext};
// mock the context for testing, notice "signer_account_id" that was accessed above from env::
fn get_context(input: Vec<u8>, is_view: bool) -> VMContext {
VMContext {
current_account_id: "alice_near".to_string(),
signer_account_id: "bob_near".to_string(),
signer_account_pk: vec![0, 1, 2],
predecessor_account_id: "carol_near".to_string(),
input,
block_index: 0,
block_timestamp: 0,
account_balance: 0,
account_locked_balance: 0,
storage_usage: 0,
attached_deposit: 0,
prepaid_gas: 10u64.pow(18),
random_seed: vec![0, 1, 2],
is_view,
output_data_receivers: vec![],
epoch_height: 19,
}
}
#[test]
fn set_then_get_greeting() {
let context = get_context(vec![], false);
testing_env!(context);
let mut contract = NearDice::default();
contract.set_greeting("howdy".to_string());
assert_eq!(
"howdy".to_string(),
contract.get_greeting("bob_near".to_string())
);
}
#[test]
fn get_default_greeting() {
let context = get_context(vec![], true);
testing_env!(context);
let contract = NearDice::default();
// this test did not call set_greeting so should return the default "Hello" greeting
assert_eq!(
"Hello".to_string(),
contract.get_greeting("francis.near".to_string())
);
}
}
| {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
} | identifier_body |
lib.rs | /*
* This is NearDice contract:
*
*
*
*/
// To conserve gas, efficient serialization is achieved through Borsh (http://borsh.io/)
use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize};
use near_sdk::wee_alloc;
use near_sdk::json_types::{U64, U128};
use near_sdk::serde::{Deserialize, Serialize};
use near_sdk::{env, near_bindgen, AccountId, Balance, BlockHeight, Promise};
use near_sdk::collections::{Vector, LookupMap};
use uint::construct_uint;
construct_uint! {
/// 256-bit unsigned integer.
pub struct U256(4);
}
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Clone)]
#[serde(crate = "near_sdk::serde")]
pub struct RewardFeeFraction {
pub numerator: u32,
pub denominator: u32,
}
impl RewardFeeFraction {
pub fn assert_valid(&self) {
assert_ne!(self.denominator, 0, "Denominator must be a positive number");
assert!(
self.numerator <= self.denominator,
"The reward fee must be less or equal to 1"
);
}
pub fn multiply(&self, value: Balance) -> Balance {
(U256::from(self.numerator) * U256::from(value) / U256::from(self.denominator)).as_u128()
}
}
#[derive(BorshDeserialize, BorshSerialize)]
pub struct WinnerInfo {
pub user: AccountId, // winner
pub amount: Balance, // win prize
pub height: BlockHeight,
pub ts: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableWinnerInfo {
pub user: AccountId,
pub amount: U128,
pub height: U64,
pub ts: U64,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableContractInfo {
pub owner: AccountId,
pub jack_pod: U128,
pub owner_pod: U128,
pub dice_number: u8,
pub rolling_fee: U128,
}
#[derive(Serialize, Deserialize)]
#[serde(crate = "near_sdk::serde")]
pub struct HumanReadableDiceResult {
pub user: AccountId,
pub user_guess: u8,
pub dice_point: u8,
pub reward_amount: U128,
pub jackpod_left: U128,
pub height: U64,
pub ts: U64,
}
// Structs in Rust are similar to other languages, and may include impl keyword as shown below
// Note: the names of the structs are not important when calling the smart contract, but the function names are
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct NearDice {
pub owner_id: AccountId,
pub dice_number: u8,
pub rolling_fee: Balance, // how many NEAR needed to roll once.
pub jack_pod: Balance, // half of them would be show to user as jack_pod amount
pub owner_pod: Balance, // incoming of the contract, can be withdraw by owner
pub reward_fee_fraction: RewardFeeFraction,
pub win_history: Vector<WinnerInfo>,
pub accounts: LookupMap<AccountId, Balance>, // record user deposit to buy dice
}
impl Default for NearDice {
fn default() -> Self {
env::panic(b"dice contract should be initialized before usage")
}
}
#[near_bindgen]
impl NearDice {
#[init]
pub fn new(
owner_id: AccountId,
dice_number: u8,
rolling_fee: U128,
reward_fee_fraction: RewardFeeFraction,
) -> Self {
assert!(!env::state_exists(), "Already initialized");
reward_fee_fraction.assert_valid();
assert!(
env::is_valid_account_id(owner_id.as_bytes()),
"The owner account ID is invalid"
);
Self {
owner_id,
dice_number,
rolling_fee: rolling_fee.into(),
jack_pod: 0_u128,
owner_pod: 0_u128,
reward_fee_fraction,
win_history: Vector::new(b"w".to_vec()),
accounts: LookupMap::new(b"a".to_vec()),
}
}
//***********************/
// owner functions
//***********************/
fn assert_owner(&self) {
assert_eq!(
env::predecessor_account_id(),
self.owner_id,
"Can only be called by the owner"
);
}
///
pub fn withdraw_ownerpod(&mut self, amount: U128) {
self.assert_owner();
let amount: Balance = amount.into();
assert!(
self.owner_pod >= amount,
"The owner pod has insurficent funds"
);
let account_id = env::predecessor_account_id();
self.owner_pod -= amount;
Promise::new(account_id).transfer(amount);
}
#[payable]
pub fn deposit_jackpod(&mut self) {
self.assert_owner();
let amount = env::attached_deposit();
self.jack_pod += amount;
}
/// Owner's method.
/// Updates current reward fee fraction to the new given fraction.
pub fn update_reward_fee_fraction(&mut self, reward_fee_fraction: RewardFeeFraction) {
self.assert_owner();
reward_fee_fraction.assert_valid();
self.reward_fee_fraction = reward_fee_fraction;
}
pub fn update_dice_number(&mut self, dice_number: u8) {
self.assert_owner();
self.dice_number = dice_number;
}
pub fn update_rolling_fee(&mut self, rolling_fee: U128) {
self.assert_owner();
self.rolling_fee = rolling_fee.into();
}
//***********************/
// rolling functions
//***********************/
#[payable]
pub fn buy_dice(&mut self) {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user attached enough rolling fee to buy at least one dice
let amount = env::attached_deposit();
assert!(
amount >= self.rolling_fee,
format!("You must deposit more than {}", self.rolling_fee)
);
let buy_dice_count = amount / self.rolling_fee;
let leftover = amount - buy_dice_count * self.rolling_fee;
let old_value = self.accounts.get(&account_id).unwrap_or(0);
self.accounts.insert(&account_id, &(old_value + buy_dice_count * self.rolling_fee));
// change refund
if leftover > 0 {
Promise::new(account_id).transfer(leftover);
}
}
/// rolling dice
/// check the deposit is larger than rolling_fee NEAR, and return leftover back to caller at end of call,
/// add rolling_fee NEAR to jackpod and get random number between [1, self.dice_number * 6],
/// if identical to target, modify jackpod amount and transfer half of jackpod to caller (within a tip to the owner_pod)
pub fn roll_dice(&mut self, target: u8) -> HumanReadableDiceResult {
// check called by real user NOT from other contracts
let account_id = env::predecessor_account_id();
assert_eq!(
account_id.clone(),
env::signer_account_id(),
"This method must be called directly from user."
);
// check user has at least one dice remain
let balance = self.accounts.get(&account_id).unwrap_or(0);
assert!(
balance / self.rolling_fee >= 1,
"You must at least have one dice to play"
);
// update account dice
let leftover = balance - self.rolling_fee; | // always update jack_pod before rolling dice
self.jack_pod += self.rolling_fee;
// rolling dice here
let random_u8: u8 = env::random_seed().iter().fold(0_u8, |acc, x| acc.wrapping_add(*x));
let dice_point = self.dice_number as u16 * 6_u16 * random_u8 as u16 / 0x100_u16 + 1;
let mut result = HumanReadableDiceResult {
user: account_id.clone(),
user_guess: target,
dice_point: dice_point as u8,
reward_amount: 0.into(), // if win, need update
jackpod_left: self.jack_pod.into(), // if win, need update
height: env::block_index().into(),
ts: env::block_timestamp().into(),
};
// let's see how lucky caller is this time
if target == dice_point as u8 { // Wow, he wins
// figure out gross reward and update jack pod
let gross_reward = self.jack_pod / 2;
self.jack_pod -= gross_reward;
// split gross to net and owner fee
let owners_fee = self.reward_fee_fraction.multiply(gross_reward);
result.reward_amount = (gross_reward - owners_fee).into();
result.jackpod_left = self.jack_pod.into();
// update owner pod
self.owner_pod += owners_fee;
// records this winning
self.win_history.push(&WinnerInfo {
user: account_id.clone(),
amount: gross_reward - owners_fee,
height: env::block_index(),
ts: env::block_timestamp(),
});
}
result
}
pub fn set_greeting(&mut self, message: String) {
let account_id = env::signer_account_id();
// Use env::log to record logs permanently to the blockchain!
env::log(format!("Saving greeting '{}' for account '{}'", message, account_id,).as_bytes());
}
//***********************/
// view functions
//***********************/
fn get_hr_info(&self, index: u64) -> HumanReadableWinnerInfo {
let info = self.win_history.get(index).expect("Error: no this item in winner history!");
HumanReadableWinnerInfo {
user: info.user.clone(),
amount: info.amount.into(),
height: info.height.into(),
ts: info.ts.into(),
}
}
/// Returns the list of winner info in LIFO order
pub fn get_win_history(&self, from_index: u64, limit: u64) -> Vec<HumanReadableWinnerInfo> {
let counts: u64 = self.win_history.len() as u64;
(from_index..std::cmp::min(from_index + limit, counts))
.map(|index| self.get_hr_info(counts - index - 1)) // reverse to get LIFO order
.collect()
}
pub fn get_contract_info(&self) -> HumanReadableContractInfo {
HumanReadableContractInfo {
owner: self.owner_id.clone(),
jack_pod: self.jack_pod.into(),
owner_pod: self.owner_pod.into(),
dice_number: self.dice_number,
rolling_fee: self.rolling_fee.into(),
}
}
/// Returns the current reward fee as a fraction.
pub fn get_reward_fee_fraction(&self) -> RewardFeeFraction {
self.reward_fee_fraction.clone()
}
/// return user's available dice count
pub fn get_account_dice_count(&self, account_id: String) -> u8 {
let balance = self.accounts.get(&account_id.into()).unwrap_or(0);
(balance / self.rolling_fee) as u8
}
pub fn get_greeting(&self, account_id: String) -> &str {
"Hello, this method has obsolete"
}
}
/*
* The rest of this file holds the inline tests for the code above
* Learn more about Rust tests: https://doc.rust-lang.org/book/ch11-01-writing-tests.html
*
* To run from contract directory:
* cargo test -- --nocapture
*
* From project root, to run in combination with frontend tests:
* yarn test
*
*/
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::MockedBlockchain;
use near_sdk::{testing_env, VMContext};
// mock the context for testing, notice "signer_account_id" that was accessed above from env::
fn get_context(input: Vec<u8>, is_view: bool) -> VMContext {
VMContext {
current_account_id: "alice_near".to_string(),
signer_account_id: "bob_near".to_string(),
signer_account_pk: vec![0, 1, 2],
predecessor_account_id: "carol_near".to_string(),
input,
block_index: 0,
block_timestamp: 0,
account_balance: 0,
account_locked_balance: 0,
storage_usage: 0,
attached_deposit: 0,
prepaid_gas: 10u64.pow(18),
random_seed: vec![0, 1, 2],
is_view,
output_data_receivers: vec![],
epoch_height: 19,
}
}
#[test]
fn set_then_get_greeting() {
let context = get_context(vec![], false);
testing_env!(context);
let mut contract = NearDice::default();
contract.set_greeting("howdy".to_string());
assert_eq!(
"howdy".to_string(),
contract.get_greeting("bob_near".to_string())
);
}
#[test]
fn get_default_greeting() {
let context = get_context(vec![], true);
testing_env!(context);
let contract = NearDice::default();
// this test did not call set_greeting so should return the default "Hello" greeting
assert_eq!(
"Hello".to_string(),
contract.get_greeting("francis.near".to_string())
);
}
} | if leftover == 0 {
self.accounts.remove(&account_id);
} else {
self.accounts.insert(&account_id, &leftover);
} | random_line_split |
parser.rs | use nom::{bytes::complete as bytes, character::complete as character, combinator, IResult};
use super::{
finder::FileSearcher,
github::{GitHubIssue, GitHubPatch},
};
use serde::Deserialize;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path};
pub mod issue;
pub mod langs;
pub mod source;
use issue::GitHubTodoLocation;
use source::ParsedTodo;
/// Eat a whole line and optionally its ending but don't return that ending.
pub fn take_to_eol(i: &str) -> IResult<&str, &str> {
let (i, ln) = bytes::take_till(|c| c == '\r' || c == '\n')(i)?;
let (i, _) = combinator::opt(character::line_ending)(i)?;
Ok((i, ln))
}
#[derive(Debug, Deserialize, Clone)]
pub enum IssueProvider {
GitHub,
}
#[derive(Debug, Clone)]
pub enum ParsingSource {
MarkdownFile,
SourceCode,
IssueAt(IssueProvider),
}
#[derive(Debug, Clone)]
pub struct IssueHead<K> {
pub title: String,
pub assignees: Vec<String>,
pub external_id: K,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IssueBody<T> {
pub descs_and_srcs: Vec<(Vec<String>, T)>,
pub branches: Vec<String>,
}
impl IssueBody<FileTodoLocation> {
pub fn to_github_string(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let mut lines: Vec<String> = vec![];
for (desc_lines, loc) in self.descs_and_srcs.iter() {
let desc = desc_lines.clone().join("\n");
let link = loc.to_github_link(cwd, owner, repo, checkout)?;
lines.push(vec![desc, link].join("\n"));
}
Ok(lines.join("\n"))
}
}
#[derive(Debug, Clone)]
pub struct Issue<ExternalId, TodoLocation: PartialEq + Eq> {
pub head: IssueHead<ExternalId>,
pub body: IssueBody<TodoLocation>,
}
impl<ExId, Loc: PartialEq + Eq> Issue<ExId, Loc> {
pub fn new(id: ExId, title: String) -> Self {
Issue {
head: IssueHead {
title,
assignees: vec![],
external_id: id,
},
body: IssueBody {
descs_and_srcs: vec![],
branches: vec![],
},
}
}
}
#[derive(Debug, Clone)]
pub struct IssueMap<ExternalId, TodoLocation: PartialEq + Eq> {
pub parsed_from: ParsingSource,
pub todos: HashMap<String, Issue<ExternalId, TodoLocation>>,
}
/// A todo location in the local filesystem.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileTodoLocation {
pub file: String,
pub src_span: (usize, Option<usize>),
}
impl FileTodoLocation {
/// ```rust
/// use todo_finder_lib::parser::FileTodoLocation;
///
/// let loc = FileTodoLocation {
/// file: "/total/path/src/file.rs".into(),
/// src_span: (666, Some(1337)),
/// };
///
/// let string = loc
/// .to_github_link("/total/path", "schell", "my_repo", "1234567890")
/// .unwrap();
///
/// assert_eq!(
/// &string,
/// "https://github.com/schell/my_repo/blob/1234567890/src/file.rs#L666-L1337"
/// );
/// ```
pub fn to_github_link(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let path: &Path = Path::new(&self.file);
let relative: &Path = path
.strip_prefix(cwd)
.map_err(|e| format!("could not relativize path {:#?}: {}", path, e))?;
let file_and_range = vec![
format!("{}", relative.display()),
format!("#L{}", self.src_span.0),
if let Some(end) = self.src_span.1 {
format!("-L{}", end)
} else {
String::new()
},
]
.concat();
let parts = vec![
"https://github.com",
owner,
repo,
"blob",
checkout,
&file_and_range,
];
Ok(parts.join("/"))
}
}
impl<K, V: Eq> IssueMap<K, V> {
pub fn new(parsed_from: ParsingSource) -> IssueMap<K, V> {
IssueMap {
parsed_from,
todos: HashMap::new(),
}
}
}
impl IssueMap<u64, GitHubTodoLocation> {
pub fn new_github_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::IssueAt(IssueProvider::GitHub),
todos: HashMap::new(),
}
}
pub fn add_issue(&mut self, github_issue: &GitHubIssue) {
if let Ok((_, body)) = issue::issue_body(&github_issue.body) {
let mut issue = Issue::new(github_issue.number, github_issue.title.clone());
issue.body = body;
self.todos.insert(github_issue.title.clone(), issue);
}
}
pub fn prepare_patch(&self, local: IssueMap<(), FileTodoLocation>) -> GitHubPatch {
let mut create = IssueMap::new_source_todos();
let mut edit: IssueMap<u64, FileTodoLocation> = IssueMap::new(ParsingSource::SourceCode);
let mut dont_delete = vec![];
for (title, local_issue) in local.todos.into_iter() {
if let Some(remote_issue) = self.todos.get(&title) {
// They both have it
let id = remote_issue.head.external_id.clone();
dont_delete.push(id);
let issue = Issue {
head: remote_issue.head.clone(),
body: local_issue.body,
};
edit.todos.insert(title, issue);
} else {
// Must be created
create.todos.insert(title, local_issue);
}
}
let delete = self
.todos
.values()
.filter_map(|issue| {
let id = issue.head.external_id;
if dont_delete.contains(&id) {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
return GitHubPatch {
create,
edit,
delete,
};
}
}
impl IssueMap<(), FileTodoLocation> {
pub fn new_source_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::SourceCode,
todos: HashMap::new(),
}
}
pub fn distinct_len(&self) -> usize {
self.todos.len()
}
pub fn add_parsed_todo(&mut self, todo: &ParsedTodo, loc: FileTodoLocation) {
let title = todo.title.to_string();
let issue = self
.todos
.entry(title.clone())
.or_insert(Issue::new((), title));
if let Some(assignee) = todo.assignee.map(|s| s.to_string()) {
if!issue.head.assignees.contains(&assignee) {
issue.head.assignees.push(assignee);
}
} | let desc_lines = todo
.desc_lines
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
issue.body.descs_and_srcs.push((desc_lines, loc));
}
pub fn from_files_in_directory(
dir: &str,
excludes: &Vec<String>,
) -> Result<IssueMap<(), FileTodoLocation>, String> {
let possible_todos = FileSearcher::find(dir, excludes)?;
let mut todos = IssueMap::new_source_todos();
let language_map = langs::language_map();
for possible_todo in possible_todos.into_iter() {
let path = Path::new(&possible_todo.file);
// Get our parser for this extension
let ext: Option<_> = path.extension();
if ext.is_none() {
continue;
}
let ext: &str = ext
.expect("impossible!")
.to_str()
.expect("could not get extension as str");
let languages = language_map.get(ext);
if languages.is_none() {
// TODO: Deadletter the file name as unsupported
println!("possible TODO found in unsupported file: {:#?}", path);
continue;
}
let languages = languages.expect("impossible!");
// Open the file and load the contents
let mut file = File::open(path)
.map_err(|e| format!("could not open file: {}\n{}", path.display(), e))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("could not read file {:#?}: {}", path, e))?;
let mut current_line = 1;
let mut i = contents.as_str();
for line in possible_todo.lines_to_search.into_iter() {
// Seek to the correct line...
while line > current_line {
let (j, _) =
take_to_eol(i).map_err(|e| format!("couldn't take line:\n{}", e))?;
i = j;
current_line += 1;
}
// Try parsing in each language until we get a match
for language in languages.iter() {
let parser_config = language.as_todo_parser_config();
let parser = source::parse_todo(parser_config);
if let Ok((j, parsed_todo)) = parser(i) {
let num_lines = i.trim_end_matches(j).lines().fold(0, |n, _| n + 1);
let loc = FileTodoLocation {
file: possible_todo.file.to_string(),
src_span: (
line,
if num_lines > 1 {
Some(line + num_lines - 1)
} else {
None
},
),
};
todos.add_parsed_todo(&parsed_todo, loc);
}
}
}
}
Ok(todos)
}
pub fn as_markdown(&self) -> String {
let num_distinct = self.todos.len();
let num_locs = self
.todos
.values()
.fold(0, |n, todo| n + todo.body.descs_and_srcs.len());
let mut lines = vec![];
lines.push("# TODOs".into());
lines.push(format!(
"Found {} distinct TODOs in {} file locations.\n",
num_distinct, num_locs
));
let mut todos = self.todos.clone().into_iter().collect::<Vec<_>>();
todos.sort_by(|a, b| a.0.cmp(&b.0));
for ((title, issue), n) in todos.into_iter().zip(1..) {
lines.push(format!("{}. {}", n, title));
for (descs, loc) in issue.body.descs_and_srcs.into_iter() {
for line in descs.into_iter() {
lines.push(format!(" {}", line));
}
lines.push(format!(
" file://{} ({})",
loc.file,
if let Some(end) = loc.src_span.1 {
format!("lines {} - {}", loc.src_span.0, end)
} else {
format!("line {}", loc.src_span.0)
},
));
lines.push("".into());
}
if issue.head.assignees.len() > 0 {
lines.push(format!(
" assignees: {}\n",
issue.head.assignees.join(", ")
));
}
}
lines.join("\n")
}
} | random_line_split |
|
parser.rs | use nom::{bytes::complete as bytes, character::complete as character, combinator, IResult};
use super::{
finder::FileSearcher,
github::{GitHubIssue, GitHubPatch},
};
use serde::Deserialize;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path};
pub mod issue;
pub mod langs;
pub mod source;
use issue::GitHubTodoLocation;
use source::ParsedTodo;
/// Eat a whole line and optionally its ending but don't return that ending.
pub fn take_to_eol(i: &str) -> IResult<&str, &str> {
let (i, ln) = bytes::take_till(|c| c == '\r' || c == '\n')(i)?;
let (i, _) = combinator::opt(character::line_ending)(i)?;
Ok((i, ln))
}
#[derive(Debug, Deserialize, Clone)]
pub enum IssueProvider {
GitHub,
}
#[derive(Debug, Clone)]
pub enum ParsingSource {
MarkdownFile,
SourceCode,
IssueAt(IssueProvider),
}
#[derive(Debug, Clone)]
pub struct IssueHead<K> {
pub title: String,
pub assignees: Vec<String>,
pub external_id: K,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IssueBody<T> {
pub descs_and_srcs: Vec<(Vec<String>, T)>,
pub branches: Vec<String>,
}
impl IssueBody<FileTodoLocation> {
pub fn to_github_string(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let mut lines: Vec<String> = vec![];
for (desc_lines, loc) in self.descs_and_srcs.iter() {
let desc = desc_lines.clone().join("\n");
let link = loc.to_github_link(cwd, owner, repo, checkout)?;
lines.push(vec![desc, link].join("\n"));
}
Ok(lines.join("\n"))
}
}
#[derive(Debug, Clone)]
pub struct Issue<ExternalId, TodoLocation: PartialEq + Eq> {
pub head: IssueHead<ExternalId>,
pub body: IssueBody<TodoLocation>,
}
impl<ExId, Loc: PartialEq + Eq> Issue<ExId, Loc> {
pub fn new(id: ExId, title: String) -> Self {
Issue {
head: IssueHead {
title,
assignees: vec![],
external_id: id,
},
body: IssueBody {
descs_and_srcs: vec![],
branches: vec![],
},
}
}
}
#[derive(Debug, Clone)]
pub struct IssueMap<ExternalId, TodoLocation: PartialEq + Eq> {
pub parsed_from: ParsingSource,
pub todos: HashMap<String, Issue<ExternalId, TodoLocation>>,
}
/// A todo location in the local filesystem.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileTodoLocation {
pub file: String,
pub src_span: (usize, Option<usize>),
}
impl FileTodoLocation {
/// ```rust
/// use todo_finder_lib::parser::FileTodoLocation;
///
/// let loc = FileTodoLocation {
/// file: "/total/path/src/file.rs".into(),
/// src_span: (666, Some(1337)),
/// };
///
/// let string = loc
/// .to_github_link("/total/path", "schell", "my_repo", "1234567890")
/// .unwrap();
///
/// assert_eq!(
/// &string,
/// "https://github.com/schell/my_repo/blob/1234567890/src/file.rs#L666-L1337"
/// );
/// ```
pub fn to_github_link(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let path: &Path = Path::new(&self.file);
let relative: &Path = path
.strip_prefix(cwd)
.map_err(|e| format!("could not relativize path {:#?}: {}", path, e))?;
let file_and_range = vec![
format!("{}", relative.display()),
format!("#L{}", self.src_span.0),
if let Some(end) = self.src_span.1 {
format!("-L{}", end)
} else {
String::new()
},
]
.concat();
let parts = vec![
"https://github.com",
owner,
repo,
"blob",
checkout,
&file_and_range,
];
Ok(parts.join("/"))
}
}
impl<K, V: Eq> IssueMap<K, V> {
pub fn new(parsed_from: ParsingSource) -> IssueMap<K, V> {
IssueMap {
parsed_from,
todos: HashMap::new(),
}
}
}
impl IssueMap<u64, GitHubTodoLocation> {
pub fn new_github_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::IssueAt(IssueProvider::GitHub),
todos: HashMap::new(),
}
}
pub fn add_issue(&mut self, github_issue: &GitHubIssue) {
if let Ok((_, body)) = issue::issue_body(&github_issue.body) {
let mut issue = Issue::new(github_issue.number, github_issue.title.clone());
issue.body = body;
self.todos.insert(github_issue.title.clone(), issue);
}
}
pub fn prepare_patch(&self, local: IssueMap<(), FileTodoLocation>) -> GitHubPatch {
let mut create = IssueMap::new_source_todos();
let mut edit: IssueMap<u64, FileTodoLocation> = IssueMap::new(ParsingSource::SourceCode);
let mut dont_delete = vec![];
for (title, local_issue) in local.todos.into_iter() {
if let Some(remote_issue) = self.todos.get(&title) {
// They both have it
let id = remote_issue.head.external_id.clone();
dont_delete.push(id);
let issue = Issue {
head: remote_issue.head.clone(),
body: local_issue.body,
};
edit.todos.insert(title, issue);
} else {
// Must be created
create.todos.insert(title, local_issue);
}
}
let delete = self
.todos
.values()
.filter_map(|issue| {
let id = issue.head.external_id;
if dont_delete.contains(&id) {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
return GitHubPatch {
create,
edit,
delete,
};
}
}
impl IssueMap<(), FileTodoLocation> {
pub fn new_source_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::SourceCode,
todos: HashMap::new(),
}
}
pub fn distinct_len(&self) -> usize {
self.todos.len()
}
pub fn | (&mut self, todo: &ParsedTodo, loc: FileTodoLocation) {
let title = todo.title.to_string();
let issue = self
.todos
.entry(title.clone())
.or_insert(Issue::new((), title));
if let Some(assignee) = todo.assignee.map(|s| s.to_string()) {
if!issue.head.assignees.contains(&assignee) {
issue.head.assignees.push(assignee);
}
}
let desc_lines = todo
.desc_lines
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
issue.body.descs_and_srcs.push((desc_lines, loc));
}
pub fn from_files_in_directory(
dir: &str,
excludes: &Vec<String>,
) -> Result<IssueMap<(), FileTodoLocation>, String> {
let possible_todos = FileSearcher::find(dir, excludes)?;
let mut todos = IssueMap::new_source_todos();
let language_map = langs::language_map();
for possible_todo in possible_todos.into_iter() {
let path = Path::new(&possible_todo.file);
// Get our parser for this extension
let ext: Option<_> = path.extension();
if ext.is_none() {
continue;
}
let ext: &str = ext
.expect("impossible!")
.to_str()
.expect("could not get extension as str");
let languages = language_map.get(ext);
if languages.is_none() {
// TODO: Deadletter the file name as unsupported
println!("possible TODO found in unsupported file: {:#?}", path);
continue;
}
let languages = languages.expect("impossible!");
// Open the file and load the contents
let mut file = File::open(path)
.map_err(|e| format!("could not open file: {}\n{}", path.display(), e))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("could not read file {:#?}: {}", path, e))?;
let mut current_line = 1;
let mut i = contents.as_str();
for line in possible_todo.lines_to_search.into_iter() {
// Seek to the correct line...
while line > current_line {
let (j, _) =
take_to_eol(i).map_err(|e| format!("couldn't take line:\n{}", e))?;
i = j;
current_line += 1;
}
// Try parsing in each language until we get a match
for language in languages.iter() {
let parser_config = language.as_todo_parser_config();
let parser = source::parse_todo(parser_config);
if let Ok((j, parsed_todo)) = parser(i) {
let num_lines = i.trim_end_matches(j).lines().fold(0, |n, _| n + 1);
let loc = FileTodoLocation {
file: possible_todo.file.to_string(),
src_span: (
line,
if num_lines > 1 {
Some(line + num_lines - 1)
} else {
None
},
),
};
todos.add_parsed_todo(&parsed_todo, loc);
}
}
}
}
Ok(todos)
}
pub fn as_markdown(&self) -> String {
let num_distinct = self.todos.len();
let num_locs = self
.todos
.values()
.fold(0, |n, todo| n + todo.body.descs_and_srcs.len());
let mut lines = vec![];
lines.push("# TODOs".into());
lines.push(format!(
"Found {} distinct TODOs in {} file locations.\n",
num_distinct, num_locs
));
let mut todos = self.todos.clone().into_iter().collect::<Vec<_>>();
todos.sort_by(|a, b| a.0.cmp(&b.0));
for ((title, issue), n) in todos.into_iter().zip(1..) {
lines.push(format!("{}. {}", n, title));
for (descs, loc) in issue.body.descs_and_srcs.into_iter() {
for line in descs.into_iter() {
lines.push(format!(" {}", line));
}
lines.push(format!(
" file://{} ({})",
loc.file,
if let Some(end) = loc.src_span.1 {
format!("lines {} - {}", loc.src_span.0, end)
} else {
format!("line {}", loc.src_span.0)
},
));
lines.push("".into());
}
if issue.head.assignees.len() > 0 {
lines.push(format!(
" assignees: {}\n",
issue.head.assignees.join(", ")
));
}
}
lines.join("\n")
}
}
| add_parsed_todo | identifier_name |
parser.rs | use nom::{bytes::complete as bytes, character::complete as character, combinator, IResult};
use super::{
finder::FileSearcher,
github::{GitHubIssue, GitHubPatch},
};
use serde::Deserialize;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path};
pub mod issue;
pub mod langs;
pub mod source;
use issue::GitHubTodoLocation;
use source::ParsedTodo;
/// Eat a whole line and optionally its ending but don't return that ending.
pub fn take_to_eol(i: &str) -> IResult<&str, &str> {
let (i, ln) = bytes::take_till(|c| c == '\r' || c == '\n')(i)?;
let (i, _) = combinator::opt(character::line_ending)(i)?;
Ok((i, ln))
}
#[derive(Debug, Deserialize, Clone)]
pub enum IssueProvider {
GitHub,
}
#[derive(Debug, Clone)]
pub enum ParsingSource {
MarkdownFile,
SourceCode,
IssueAt(IssueProvider),
}
#[derive(Debug, Clone)]
pub struct IssueHead<K> {
pub title: String,
pub assignees: Vec<String>,
pub external_id: K,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IssueBody<T> {
pub descs_and_srcs: Vec<(Vec<String>, T)>,
pub branches: Vec<String>,
}
impl IssueBody<FileTodoLocation> {
pub fn to_github_string(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let mut lines: Vec<String> = vec![];
for (desc_lines, loc) in self.descs_and_srcs.iter() {
let desc = desc_lines.clone().join("\n");
let link = loc.to_github_link(cwd, owner, repo, checkout)?;
lines.push(vec![desc, link].join("\n"));
}
Ok(lines.join("\n"))
}
}
#[derive(Debug, Clone)]
pub struct Issue<ExternalId, TodoLocation: PartialEq + Eq> {
pub head: IssueHead<ExternalId>,
pub body: IssueBody<TodoLocation>,
}
impl<ExId, Loc: PartialEq + Eq> Issue<ExId, Loc> {
pub fn new(id: ExId, title: String) -> Self {
Issue {
head: IssueHead {
title,
assignees: vec![],
external_id: id,
},
body: IssueBody {
descs_and_srcs: vec![],
branches: vec![],
},
}
}
}
#[derive(Debug, Clone)]
pub struct IssueMap<ExternalId, TodoLocation: PartialEq + Eq> {
pub parsed_from: ParsingSource,
pub todos: HashMap<String, Issue<ExternalId, TodoLocation>>,
}
/// A todo location in the local filesystem.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FileTodoLocation {
pub file: String,
pub src_span: (usize, Option<usize>),
}
impl FileTodoLocation {
/// ```rust
/// use todo_finder_lib::parser::FileTodoLocation;
///
/// let loc = FileTodoLocation {
/// file: "/total/path/src/file.rs".into(),
/// src_span: (666, Some(1337)),
/// };
///
/// let string = loc
/// .to_github_link("/total/path", "schell", "my_repo", "1234567890")
/// .unwrap();
///
/// assert_eq!(
/// &string,
/// "https://github.com/schell/my_repo/blob/1234567890/src/file.rs#L666-L1337"
/// );
/// ```
pub fn to_github_link(
&self,
cwd: &str,
owner: &str,
repo: &str,
checkout: &str,
) -> Result<String, String> {
let path: &Path = Path::new(&self.file);
let relative: &Path = path
.strip_prefix(cwd)
.map_err(|e| format!("could not relativize path {:#?}: {}", path, e))?;
let file_and_range = vec![
format!("{}", relative.display()),
format!("#L{}", self.src_span.0),
if let Some(end) = self.src_span.1 {
format!("-L{}", end)
} else {
String::new()
},
]
.concat();
let parts = vec![
"https://github.com",
owner,
repo,
"blob",
checkout,
&file_and_range,
];
Ok(parts.join("/"))
}
}
impl<K, V: Eq> IssueMap<K, V> {
pub fn new(parsed_from: ParsingSource) -> IssueMap<K, V> {
IssueMap {
parsed_from,
todos: HashMap::new(),
}
}
}
impl IssueMap<u64, GitHubTodoLocation> {
pub fn new_github_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::IssueAt(IssueProvider::GitHub),
todos: HashMap::new(),
}
}
pub fn add_issue(&mut self, github_issue: &GitHubIssue) {
if let Ok((_, body)) = issue::issue_body(&github_issue.body) {
let mut issue = Issue::new(github_issue.number, github_issue.title.clone());
issue.body = body;
self.todos.insert(github_issue.title.clone(), issue);
}
}
pub fn prepare_patch(&self, local: IssueMap<(), FileTodoLocation>) -> GitHubPatch {
let mut create = IssueMap::new_source_todos();
let mut edit: IssueMap<u64, FileTodoLocation> = IssueMap::new(ParsingSource::SourceCode);
let mut dont_delete = vec![];
for (title, local_issue) in local.todos.into_iter() {
if let Some(remote_issue) = self.todos.get(&title) {
// They both have it
let id = remote_issue.head.external_id.clone();
dont_delete.push(id);
let issue = Issue {
head: remote_issue.head.clone(),
body: local_issue.body,
};
edit.todos.insert(title, issue);
} else {
// Must be created
create.todos.insert(title, local_issue);
}
}
let delete = self
.todos
.values()
.filter_map(|issue| {
let id = issue.head.external_id;
if dont_delete.contains(&id) {
None
} else {
Some(id)
}
})
.collect::<Vec<_>>();
return GitHubPatch {
create,
edit,
delete,
};
}
}
impl IssueMap<(), FileTodoLocation> {
pub fn new_source_todos() -> Self {
IssueMap {
parsed_from: ParsingSource::SourceCode,
todos: HashMap::new(),
}
}
pub fn distinct_len(&self) -> usize {
self.todos.len()
}
pub fn add_parsed_todo(&mut self, todo: &ParsedTodo, loc: FileTodoLocation) {
let title = todo.title.to_string();
let issue = self
.todos
.entry(title.clone())
.or_insert(Issue::new((), title));
if let Some(assignee) = todo.assignee.map(|s| s.to_string()) {
if!issue.head.assignees.contains(&assignee) {
issue.head.assignees.push(assignee);
}
}
let desc_lines = todo
.desc_lines
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
issue.body.descs_and_srcs.push((desc_lines, loc));
}
pub fn from_files_in_directory(
dir: &str,
excludes: &Vec<String>,
) -> Result<IssueMap<(), FileTodoLocation>, String> {
let possible_todos = FileSearcher::find(dir, excludes)?;
let mut todos = IssueMap::new_source_todos();
let language_map = langs::language_map();
for possible_todo in possible_todos.into_iter() {
let path = Path::new(&possible_todo.file);
// Get our parser for this extension
let ext: Option<_> = path.extension();
if ext.is_none() {
continue;
}
let ext: &str = ext
.expect("impossible!")
.to_str()
.expect("could not get extension as str");
let languages = language_map.get(ext);
if languages.is_none() |
let languages = languages.expect("impossible!");
// Open the file and load the contents
let mut file = File::open(path)
.map_err(|e| format!("could not open file: {}\n{}", path.display(), e))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("could not read file {:#?}: {}", path, e))?;
let mut current_line = 1;
let mut i = contents.as_str();
for line in possible_todo.lines_to_search.into_iter() {
// Seek to the correct line...
while line > current_line {
let (j, _) =
take_to_eol(i).map_err(|e| format!("couldn't take line:\n{}", e))?;
i = j;
current_line += 1;
}
// Try parsing in each language until we get a match
for language in languages.iter() {
let parser_config = language.as_todo_parser_config();
let parser = source::parse_todo(parser_config);
if let Ok((j, parsed_todo)) = parser(i) {
let num_lines = i.trim_end_matches(j).lines().fold(0, |n, _| n + 1);
let loc = FileTodoLocation {
file: possible_todo.file.to_string(),
src_span: (
line,
if num_lines > 1 {
Some(line + num_lines - 1)
} else {
None
},
),
};
todos.add_parsed_todo(&parsed_todo, loc);
}
}
}
}
Ok(todos)
}
pub fn as_markdown(&self) -> String {
let num_distinct = self.todos.len();
let num_locs = self
.todos
.values()
.fold(0, |n, todo| n + todo.body.descs_and_srcs.len());
let mut lines = vec![];
lines.push("# TODOs".into());
lines.push(format!(
"Found {} distinct TODOs in {} file locations.\n",
num_distinct, num_locs
));
let mut todos = self.todos.clone().into_iter().collect::<Vec<_>>();
todos.sort_by(|a, b| a.0.cmp(&b.0));
for ((title, issue), n) in todos.into_iter().zip(1..) {
lines.push(format!("{}. {}", n, title));
for (descs, loc) in issue.body.descs_and_srcs.into_iter() {
for line in descs.into_iter() {
lines.push(format!(" {}", line));
}
lines.push(format!(
" file://{} ({})",
loc.file,
if let Some(end) = loc.src_span.1 {
format!("lines {} - {}", loc.src_span.0, end)
} else {
format!("line {}", loc.src_span.0)
},
));
lines.push("".into());
}
if issue.head.assignees.len() > 0 {
lines.push(format!(
" assignees: {}\n",
issue.head.assignees.join(", ")
));
}
}
lines.join("\n")
}
}
| {
// TODO: Deadletter the file name as unsupported
println!("possible TODO found in unsupported file: {:#?}", path);
continue;
} | conditional_block |
nvm_buffer.rs | use std::cmp;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::ptr;
use block::{AlignedBytes, BlockSize};
use nvm::NonVolatileMemory;
use {ErrorKind, Result};
/// ジャーナル領域用のバッファ.
///
/// 内部の`NonVolatileMemory`実装のアライメント制約(i.e., ストレージのブロック境界に揃っている)を満たしつつ、
/// ジャーナル領域への追記を効率化するのが目的.
#[derive(Debug)]
pub struct JournalNvmBuffer<N: NonVolatileMemory> {
// ジャーナル領域のデータを、実際に永続化するために使用される内部のNVMインスタンス
inner: N,
// 現在の読み書きカーソルの位置
position: u64,
// 書き込みバッファ
//
// ジャーナル領域から発行された書き込み要求は、
// 以下のいずれかの条件を満たすまでは、メモリ上の本バッファに保持されており、
// 内部NVMには反映されないままとなる:
// - `sync`メソッドが呼び出された:
// - ジャーナル領域は定期的に本メソッドを呼び出す
// - 書き込みバッファのカバー範囲に重複する領域に対して、読み込み要求が発行された場合:
// - 書き込みバッファの内容をフラッシュして、内部NVMに同期した後に、該当読み込み命令を処理
// - 書き込みバッファのカバー範囲に重複しない領域に対して、書き込み要求が発行された場合:
// - 現状の書き込みバッファのデータ構造では、ギャップ(i.e., 連続しない複数部分領域)を表現することはできない
// - そのため、一度古いバッファの内容をフラッシュした後に、該当書き込み要求を処理するためのバッファを作成する
//
// ジャーナル領域が発行した書き込み要求を、
// 内部NVMのブロック境界に合うようにアライメントする役目も担っている。
write_buf: AlignedBytes,
// `write_buf`の始端が、内部NVM上のどの位置に対応するかを保持するためのフィールド
//
// 「内部NVM上での位置を指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if!self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() ||!self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity( | tState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offset) as usize;
let end = start + buf.len();
self.write_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[3..6], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..6], b"foobar");
Ok(())
}
#[test]
fn write_seek_write_flush() -> TestResult {
// "連続"の判定は、ブロック単位で行われる
// (シークしてもブロックを跨がないと"連続していない"と判定されない)
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[4..7], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[4..7], b"bar");
// シーク先を遠くした場合でも、連続するブロック内に収まっているなら同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[512..515], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[512..515], b"bar");
// 書き込み領域が重なっている場合も同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(-1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[2..5], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..5], b"fobar");
Ok(())
}
#[test]
fn write_seek_write() -> TestResult {
// 書き込み先が(ブロック単位で)隣接しなくなった場合は、現在のバッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(513)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[513..516], &[0; 3][..]);
Ok(())
}
#[test]
fn write_seek_read() -> TestResult {
// 読み込み先が、書き込みバッファと重なっている場合には、バッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
// 読み込み先が、書き込みバッファと重なっていない場合には、書き戻されない
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
Ok(())
}
#[test]
fn overwritten() -> TestResult {
// シーク地点よりも前方のデータは保持される.
// (後方の、次のブロック境界までのデータがどうなるかは未定義)
let mut buffer = new_buffer();
track_io!(buffer.write_all(&[b'a'; 512]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..512], &[b'a'; 512][..]);
track_io!(buffer.seek(SeekFrom::Start(256)))?;
track_io!(buffer.write_all(&[b'b'; 1]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..256], &[b'a'; 256][..]);
assert_eq!(buffer.nvm().as_bytes()[256], b'b');
Ok(())
}
fn new_buffer() -> JournalNvmBuffer<MemoryNvm> {
let nvm = MemoryNvm::new(vec![0; 10 * 1024]);
JournalNvmBuffer::new(nvm)
}
}
| ),
ErrorKind::Inconsisten | conditional_block |
nvm_buffer.rs | use std::cmp;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::ptr;
use block::{AlignedBytes, BlockSize};
use nvm::NonVolatileMemory;
use {ErrorKind, Result};
/// ジャーナル領域用のバッファ.
///
/// 内部の`NonVolatileMemory`実装のアライメント制約(i.e., ストレージのブロック境界に揃っている)を満たしつつ、
/// ジャーナル領域への追記を効率化するのが目的.
#[derive(Debug)]
pub struct JournalNvmBuffer<N: NonVolatileMemory> {
// ジャーナル領域のデータを、実際に永続化するために使用される内部のNVMインスタンス
inner: N,
// 現在の読み書きカーソルの位置
position: u64,
// 書き込みバッファ
//
// ジャーナル領域から発行された書き込み要求は、
// 以下のいずれかの条件を満たすまでは、メモリ上の本バッファに保持されており、
// 内部NVMには反映されないままとなる:
// - `sync`メソッドが呼び出された:
// - ジャーナル領域は定期的に本メソッドを呼び出す
// - 書き込みバッファのカバー範囲に重複する領域に対して、読み込み要求が発行された場合:
// - 書き込みバッファの内容をフラッシュして、内部NVMに同期した後に、該当読み込み命令を処理
// - 書き込みバッファのカバー範囲に重複しない領域に対して、書き込み要求が発行された場合:
// - 現状の書き込みバッファのデータ構造では、ギャップ(i.e., 連続しない複数部分領域)を表現することはできない
// - そのため、一度古いバッファの内容をフラッシュした後に、該当書き込み要求を処理するためのバッファを作成する
//
// ジャーナル領域が発行した書き込み要求を、
// 内部NVMのブロック境界に合うようにアライメントする役目も担っている。
write_buf: AlignedBytes,
// `write_buf`の始端が、内部NVM上のどの位置に対応するかを保持するためのフィールド
//
// 「内部NVM上での位置を指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if!self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() ||!self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(),
ErrorKind::InconsistentState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offset) as usize;
let end = start + buf.len();
self.write_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[3..6], &[0; 3][..]);
track_io!(buffer.f | ))?;
assert_eq!(&buffer.nvm().as_bytes()[0..6], b"foobar");
Ok(())
}
#[test]
fn write_seek_write_flush() -> TestResult {
// "連続"の判定は、ブロック単位で行われる
// (シークしてもブロックを跨がないと"連続していない"と判定されない)
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[4..7], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[4..7], b"bar");
// シーク先を遠くした場合でも、連続するブロック内に収まっているなら同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[512..515], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[512..515], b"bar");
// 書き込み領域が重なっている場合も同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(-1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[2..5], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..5], b"fobar");
Ok(())
}
#[test]
fn write_seek_write() -> TestResult {
// 書き込み先が(ブロック単位で)隣接しなくなった場合は、現在のバッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(513)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[513..516], &[0; 3][..]);
Ok(())
}
#[test]
fn write_seek_read() -> TestResult {
// 読み込み先が、書き込みバッファと重なっている場合には、バッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
// 読み込み先が、書き込みバッファと重なっていない場合には、書き戻されない
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
Ok(())
}
#[test]
fn overwritten() -> TestResult {
// シーク地点よりも前方のデータは保持される.
// (後方の、次のブロック境界までのデータがどうなるかは未定義)
let mut buffer = new_buffer();
track_io!(buffer.write_all(&[b'a'; 512]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..512], &[b'a'; 512][..]);
track_io!(buffer.seek(SeekFrom::Start(256)))?;
track_io!(buffer.write_all(&[b'b'; 1]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..256], &[b'a'; 256][..]);
assert_eq!(buffer.nvm().as_bytes()[256], b'b');
Ok(())
}
fn new_buffer() -> JournalNvmBuffer<MemoryNvm> {
let nvm = MemoryNvm::new(vec![0; 10 * 1024]);
JournalNvmBuffer::new(nvm)
}
}
| lush( | identifier_name |
nvm_buffer.rs | use std::cmp;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::ptr;
use block::{AlignedBytes, BlockSize};
use nvm::NonVolatileMemory;
use {ErrorKind, Result};
/// ジャーナル領域用のバッファ.
///
/// 内部の`NonVolatileMemory`実装のアライメント制約(i.e., ストレージのブロック境界に揃っている)を満たしつつ、
/// ジャーナル領域への追記を効率化するのが目的.
#[derive(Debug)]
pub struct JournalNvmBuffer<N: NonVolatileMemory> {
// ジャーナル領域のデータを、実際に永続化するために使用される内部のNVMインスタンス
inner: N,
// 現在の読み書きカーソルの位置
position: u64,
// 書き込みバッファ
//
// ジャーナル領域から発行された書き込み要求は、
// 以下のいずれかの条件を満たすまでは、メモリ上の本バッファに保持されており、
// 内部NVMには反映されないままとなる:
// - `sync`メソッドが呼び出された: | // - 書き込みバッファのカバー範囲に重複する領域に対して、読み込み要求が発行された場合:
// - 書き込みバッファの内容をフラッシュして、内部NVMに同期した後に、該当読み込み命令を処理
// - 書き込みバッファのカバー範囲に重複しない領域に対して、書き込み要求が発行された場合:
// - 現状の書き込みバッファのデータ構造では、ギャップ(i.e., 連続しない複数部分領域)を表現することはできない
// - そのため、一度古いバッファの内容をフラッシュした後に、該当書き込み要求を処理するためのバッファを作成する
//
// ジャーナル領域が発行した書き込み要求を、
// 内部NVMのブロック境界に合うようにアライメントする役目も担っている。
write_buf: AlignedBytes,
// `write_buf`の始端が、内部NVM上のどの位置に対応するかを保持するためのフィールド
//
// 「内部NVM上での位置を指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if!self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() ||!self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(),
ErrorKind::InconsistentState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offset) as usize;
let end = start + buf.len();
self.write_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[3..6], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..6], b"foobar");
Ok(())
}
#[test]
fn write_seek_write_flush() -> TestResult {
// "連続"の判定は、ブロック単位で行われる
// (シークしてもブロックを跨がないと"連続していない"と判定されない)
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[4..7], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[4..7], b"bar");
// シーク先を遠くした場合でも、連続するブロック内に収まっているなら同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[512..515], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[512..515], b"bar");
// 書き込み領域が重なっている場合も同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(-1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[2..5], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..5], b"fobar");
Ok(())
}
#[test]
fn write_seek_write() -> TestResult {
// 書き込み先が(ブロック単位で)隣接しなくなった場合は、現在のバッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(513)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[513..516], &[0; 3][..]);
Ok(())
}
#[test]
fn write_seek_read() -> TestResult {
// 読み込み先が、書き込みバッファと重なっている場合には、バッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
// 読み込み先が、書き込みバッファと重なっていない場合には、書き戻されない
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
Ok(())
}
#[test]
fn overwritten() -> TestResult {
// シーク地点よりも前方のデータは保持される.
// (後方の、次のブロック境界までのデータがどうなるかは未定義)
let mut buffer = new_buffer();
track_io!(buffer.write_all(&[b'a'; 512]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..512], &[b'a'; 512][..]);
track_io!(buffer.seek(SeekFrom::Start(256)))?;
track_io!(buffer.write_all(&[b'b'; 1]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..256], &[b'a'; 256][..]);
assert_eq!(buffer.nvm().as_bytes()[256], b'b');
Ok(())
}
fn new_buffer() -> JournalNvmBuffer<MemoryNvm> {
let nvm = MemoryNvm::new(vec![0; 10 * 1024]);
JournalNvmBuffer::new(nvm)
}
} | // - ジャーナル領域は定期的に本メソッドを呼び出す | random_line_split |
nvm_buffer.rs | use std::cmp;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::ptr;
use block::{AlignedBytes, BlockSize};
use nvm::NonVolatileMemory;
use {ErrorKind, Result};
/// ジャーナル領域用のバッファ.
///
/// 内部の`NonVolatileMemory`実装のアライメント制約(i.e., ストレージのブロック境界に揃っている)を満たしつつ、
/// ジャーナル領域への追記を効率化するのが目的.
#[derive(Debug)]
pub struct JournalNvmBuffer<N: NonVolatileMemory> {
// ジャーナル領域のデータを、実際に永続化するために使用される内部のNVMインスタンス
inner: N,
// 現在の読み書きカーソルの位置
position: u64,
// 書き込みバッファ
//
// ジャーナル領域から発行された書き込み要求は、
// 以下のいずれかの条件を満たすまでは、メモリ上の本バッファに保持されており、
// 内部NVMには反映されないままとなる:
// - `sync`メソッドが呼び出された:
// - ジャーナル領域は定期的に本メソッドを呼び出す
// - 書き込みバッファのカバー範囲に重複する領域に対して、読み込み要求が発行された場合:
// - 書き込みバッファの内容をフラッシュして、内部NVMに同期した後に、該当読み込み命令を処理
// - 書き込みバッファのカバー範囲に重複しない領域に対して、書き込み要求が発行された場合:
// - 現状の書き込みバッファのデータ構造では、ギャップ(i.e., 連続しない複数部分領域)を表現することはできない
// - そのため、一度古いバッファの内容をフラッシュした後に、該当書き込み要求を処理するためのバッファを作成する
//
// ジャーナル領域が発行した書き込み要求を、
// 内部NVMのブロック境界に合うようにアライメントする役目も担っている。
write_buf: AlignedBytes,
// `write_buf`の始端が、内部NVM上のどの位置に対応するかを保持するためのフィールド
//
// 「内部NVM上での位置を指す」という点では`position`フィールドと似ているが、
// `position`は読み書きやシーク操作の度に値が更新されるのに対して、
// `write_buf_offset`は、書き込みバッファの内容がフラッシュされるまでは、
// 固定の値が使用され続ける。
write_buf_offset: u64,
// 書き込みバッファ内にデータが溜まっているかどうかを判定するためのフラグ
//
// 一度でも書き込みバッファにデータが書かれたら`true`に設定され、
// 内部NVMにバッファ内のデータがフラッシュされた後は`false`に設定される。
maybe_dirty: bool,
// 読み込みバッファ
//
// ジャーナル領域が発行した読み込み要求を、
// 内部NVMのブロック境界に合うようにアライメントするために使用される。
read_buf: AlignedBytes,
}
impl<N: NonVolatileMemory> JournalNvmBuffer<N> {
/// 新しい`JournalNvmBuffer`インスタンスを生成する.
///
/// これは実際に読み書きには`nvm`を使用する.
///
/// なお`nvm`へのアクセス時に、それが`nvm`が要求するブロック境界にアライメントされていることは、
/// `JournalNvmBuffer`が保証するため、利用者が気にする必要はない.
///
/// ただし、シーク時には、シーク地点を含まない次のブロック境界までのデータは
/// 上書きされてしまうので注意が必要.
pub fn new(nvm: N) -> Self {
let block_size = nvm.block_size();
JournalNvmBuffer {
inner: nvm,
position: 0,
maybe_dirty: false,
write_buf_offset: 0,
write_buf: AlignedBytes::new(0, block_size),
read_buf: AlignedBytes::new(0, block_size),
}
}
#[cfg(test)]
pub fn nvm(&self) -> &N {
&self.inner
}
fn is_dirty_area(&self, offset: u64, length: usize) -> bool {
if!self.maybe_dirty || length == 0 || self.write_buf.is_empty() {
return false;
}
if self.write_buf_offset < offset {
let buf_end = self.write_buf_offset + self.write_buf.len() as u64;
offset < buf_end
} else {
let end = offset + length as u64;
self.write_buf_offset < end
}
}
fn flush_write_buf(&mut self) -> Result<()> {
if self.write_buf.is_empty() ||!self.maybe_dirty {
return Ok(());
}
track_io!(self.inner.seek(SeekFrom::Start(self.write_buf_offset)))?;
track_io!(self.inner.write(&self.write_buf))?;
if self.write_buf.len() > self.block_size().as_u16() as usize {
// このif節では、
// バッファに末端のalignmentバイト分(= new_len)の情報を残す。
// write_buf_offsetは、write_buf.len() - new_len(= drop_len)分だけ進められる。
//
// write_buf_offsetを、書き出しに成功したwrite_buf.len()分だけ進めて、
// write_bufをクリアすることもできるが、
// ブロック長でしか書き出すことができないため、その場合は次回の書き込み時に
// NVMに一度アクセスしてブロック全体を取得しなくてはならない。
// この読み込みを避けるため、現在の実装の形をとっている。
let new_len = self.block_size().as_u16() as usize;
let drop_len = self.write_buf.len() - new_len;
unsafe {
// This nonoverlappingness is guranteed by the callers.
ptr::copy(
self.write_buf.as_ptr().add(drop_len), // src
self.write_buf.as_mut_ptr(), // dst
new_len,
);
}
self.write_buf.truncate(new_len);
self.write_buf_offset += drop_len as u64;
}
self.maybe_dirty = false;
Ok(())
}
fn check_overflow(&self, write_len: usize) -> Result<()> {
let next_position = self.position() + write_len as u64;
track_assert!(
next_position <= self.capacity(),
ErrorKind::InconsistentState,
"self.position={}, write_len={}, self.len={}",
self.position(),
write_len,
self.capacity()
);
Ok(())
}
}
impl<N: NonVolatileMemory> NonVolatileMemory for JournalNvmBuffer<N> {
fn sync(&mut self) -> Result<()> {
track!(self.flush_write_buf())?;
self.inner.sync()
}
fn position(&self) -> u64 {
self.position
}
fn capacity(&self) -> u64 {
self.inner.capacity()
}
fn block_size(&self) -> BlockSize {
self.inner.block_size()
}
fn split(self, _: u64) -> Result<(Self, Self)> {
unreachable!()
}
}
impl<N: NonVolatileMemory> Drop for JournalNvmBuffer<N> {
fn drop(&mut self) {
let _ = self.sync();
}
}
impl<N: NonVolatileMemory> Seek for JournalNvmBuffer<N> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let offset = track!(self.convert_to_offset(pos))?;
self.position = offset;
Ok(offset)
}
}
impl<N: NonVolatileMemory> Read for JournalNvmBuffer<N> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.is_dirty_area(self.position, buf.len()) {
track!(self.flush_write_buf())?;
}
let aligned_start = self.block_size().floor_align(self.position);
let aligned_end = self
.block_size()
.ceil_align(self.position + buf.len() as u64);
self.read_buf
.aligned_resize((aligned_end - aligned_start) as usize);
self.inner.seek(SeekFrom::Start(aligned_start))?;
let inner_read_size = self.inner.read(&mut self.read_buf)?;
let start = (self.position - aligned_start) as usize;
let end = cmp::min(inner_read_size, start + buf.len());
let read_size = end - start;
(&mut buf[..read_size]).copy_from_slice(&self.read_buf[start..end]);
self.position += read_size as u64;
Ok(read_size)
}
}
impl<N: NonVolatileMemory> Write for JournalNvmBuffer<N> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
track!(self.check_overflow(buf.len()))?;
let write_buf_start = self.write_buf_offset;
let write_buf_end = write_buf_start + self.write_buf.len() as u64;
if write_buf_start <= self.position && self.position <= write_buf_end {
// 領域が重複しており、バッファの途中から追記可能
// (i.e., 書き込みバッファのフラッシュが不要)
let start = (self.position - self.write_buf_offs | ite_buf.aligned_resize(end);
(&mut self.write_buf[start..end]).copy_from_slice(buf);
self.position += buf.len() as u64;
self.maybe_dirty = true;
Ok(buf.len())
} else {
// 領域に重複がないので、一度バッファの中身を書き戻す
track!(self.flush_write_buf())?;
if self.block_size().is_aligned(self.position) {
self.write_buf_offset = self.position;
self.write_buf.aligned_resize(0);
} else {
// シーク位置より前方の既存データが破棄されてしまわないように、一度読み込みを行う.
let size = self.block_size().as_u16();
self.write_buf_offset = self.block_size().floor_align(self.position);
self.write_buf.aligned_resize(size as usize);
self.inner.seek(SeekFrom::Start(self.write_buf_offset))?;
self.inner.read_exact(&mut self.write_buf)?;
}
self.write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
track!(self.flush_write_buf())?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Seek, SeekFrom, Write};
use trackable::result::TestResult;
use super::*;
use nvm::MemoryNvm;
#[test]
fn write_write_flush() -> TestResult {
// 連続領域の書き込みは`flush`するまでバッファに残り続ける
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[3..6], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..6], b"foobar");
Ok(())
}
#[test]
fn write_seek_write_flush() -> TestResult {
// "連続"の判定は、ブロック単位で行われる
// (シークしてもブロックを跨がないと"連続していない"と判定されない)
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[4..7], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[4..7], b"bar");
// シーク先を遠くした場合でも、連続するブロック内に収まっているなら同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[512..515], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[512..515], b"bar");
// 書き込み領域が重なっている場合も同様
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Current(-1)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
assert_eq!(&buffer.nvm().as_bytes()[2..5], &[0; 3][..]);
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..5], b"fobar");
Ok(())
}
#[test]
fn write_seek_write() -> TestResult {
// 書き込み先が(ブロック単位で)隣接しなくなった場合は、現在のバッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(513)))?;
track_io!(buffer.write_all(b"bar"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
assert_eq!(&buffer.nvm().as_bytes()[513..516], &[0; 3][..]);
Ok(())
}
#[test]
fn write_seek_read() -> TestResult {
// 読み込み先が、書き込みバッファと重なっている場合には、バッファの中身がNVMに書き戻される
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], b"foo");
// 読み込み先が、書き込みバッファと重なっていない場合には、書き戻されない
let mut buffer = new_buffer();
track_io!(buffer.write_all(b"foo"))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
track_io!(buffer.seek(SeekFrom::Start(512)))?;
track_io!(buffer.read_exact(&mut [0; 1][..]))?;
assert_eq!(&buffer.nvm().as_bytes()[0..3], &[0; 3][..]);
Ok(())
}
#[test]
fn overwritten() -> TestResult {
// シーク地点よりも前方のデータは保持される.
// (後方の、次のブロック境界までのデータがどうなるかは未定義)
let mut buffer = new_buffer();
track_io!(buffer.write_all(&[b'a'; 512]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..512], &[b'a'; 512][..]);
track_io!(buffer.seek(SeekFrom::Start(256)))?;
track_io!(buffer.write_all(&[b'b'; 1]))?;
track_io!(buffer.flush())?;
assert_eq!(&buffer.nvm().as_bytes()[0..256], &[b'a'; 256][..]);
assert_eq!(buffer.nvm().as_bytes()[256], b'b');
Ok(())
}
fn new_buffer() -> JournalNvmBuffer<MemoryNvm> {
let nvm = MemoryNvm::new(vec![0; 10 * 1024]);
JournalNvmBuffer::new(nvm)
}
}
| et) as usize;
let end = start + buf.len();
self.wr | identifier_body |
linked_list.rs | //! An intrusive linked list.
//!
//! ```ignore
//! use intrusive::{define_list_node, linked_list::{List, ListLink}};
//! use std::sync::Arc;
//!
//! struct Thread {
//! id: usize,
//! link: ListLink<ThreadsNode>,
//! }
//!
//! define_list_node!(ThreadsNode, Arc<Thread>, link);
//!
//! let mut threads = List::<ThreadsNode>::new();
//! let thread1 = Arc::new(Thread { id: 1, link: Default::default() });
//! threads.push_back(thread1);
//! ```
//!
use core::cell::Cell;
use core::fmt::{self, Debug, Formatter};
use core::marker::PhantomData;
use core::ops::ControlFlow;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicBool, Ordering};
pub use etc::offset_of;
use crate::Static;
/// A trait represents a container that can be inserted into the linked list.
pub trait ListNode {
type Elem: Static;
fn elem_to_link(elem: Self::Elem) -> NonNull<ListLink<Self>>;
fn from_link_to_elem(link: NonNull<ListLink<Self>>) -> Self::Elem;
fn from_link_to_nonnull(
link: NonNull<ListLink<Self>>,
) -> NonNull<<Self::Elem as Static>::Inner>;
}
/// A link fields of the linked list embedded in a container.
pub struct ListLink<L:?Sized> {
push_lock: AtomicBool,
next: Cell<Option<NonNull<ListLink<L>>>>,
prev: Cell<Option<NonNull<ListLink<L>>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> ListLink<L> {
pub fn empty() -> ListLink<L> {
ListLink {
push_lock: AtomicBool::new(true),
next: Cell::new(None),
prev: Cell::new(None),
_pd: PhantomData,
}
}
pub fn is_in_use(&self) -> bool {
match (self.next.get(), self.prev.get()) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => unreachable!(),
}
}
fn next(&self) -> Option<NonNull<ListLink<L>>> {
self.next.get()
}
}
impl<L: ListNode> Debug for ListLink<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"ListLink<{}>",
if self.is_in_use() {
"in list"
} else {
"not in list"
}
)
}
}
impl<L: ListNode> Default for ListLink<L> {
fn default() -> Self {
ListLink::empty()
}
}
// SAFETY: ListLink is protected by `push_lock`.
unsafe impl<L: ListNode + Sync> Sync for ListLink<L> {}
#[macro_export(local_inner_macros)]
macro_rules! define_list_node {
($list_name:ident, $elem:ty, $field:ident) => {
struct $list_name;
impl $crate::linked_list::ListNode for $list_name {
type Elem = $elem;
fn elem_to_link(elem: Self::Elem) -> core::ptr::NonNull<ListLink<Self>> {
unsafe {
core::ptr::NonNull::new_unchecked(
&mut ((*$crate::Static::into_nonnull(elem).as_mut()).$field) as *mut _,
)
}
}
fn from_link_to_elem(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> Self::Elem {
let nonnull = Self::from_link_to_nonnull(link);
unsafe { $crate::Static::from_nonnull(nonnull) }
}
fn from_link_to_nonnull(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> core::ptr::NonNull<<Self::Elem as $crate::Static>::Inner> {
let offset =
$crate::linked_list::offset_of!(<Self::Elem as $crate::Static>::Inner, $field);
// SAFETY: It won't be null since link is nonnull.
unsafe {
core::ptr::NonNull::new_unchecked(
(link.as_ptr() as *mut u8).offset(-offset) as *mut _
)
}
}
}
};
}
/// An intrusive linked list.
pub struct List<L: ListNode> {
head: Option<NonNull<ListLink<L>>>,
tail: Option<NonNull<ListLink<L>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> List<L> {
/// Creates an empty linked list.
pub const fn new() -> List<L> {
List {
head: None,
tail: None,
_pd: PhantomData,
}
}
/// Returns `true` if the list is empty. `O(1)`.
pub fn is_empty(&self) -> bool {
self.head.is_some()
}
/// Returns the number of elements. `O(n)`.
pub fn len(&self) -> usize {
let mut len = 0;
for _ in self.iter() {
len += 1;
}
len
}
/// Removes and returns the first element satisfying the predicate `pred`. It
/// returns `None` the list is empty or `f` returned only `false` on all
/// elements. `O(n)`.
pub fn remove_first_if<F>(&mut self, pred: F) -> Option<L::Elem>
where
F: Fn(&<L::Elem as Static>::Inner) -> bool,
{
let mut current = self.head;
while let Some(link) = current {
if pred(unsafe { L::from_link_to_nonnull(link).as_ref() }) {
self.remove(unsafe { link.as_ref() });
return Some(L::from_link_to_elem(link));
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Calls the callback for each element. `O(n)`.
fn walk_links<F, R>(&self, mut f: F) -> Option<R>
where
F: FnMut(NonNull<ListLink<L>>) -> ControlFlow<R>,
{
let mut current = self.head;
while let Some(link) = current {
if let ControlFlow::Break(value) = f(link) {
return Some(value);
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Inserts an element at the end the list. Returns `Err(elem)` if any other
/// thread have just inserted the element to a (possibly another) list using
/// the same link as defined in `L`. `O(1)`.
pub fn push_back(&mut self, elem: L::Elem) -> Result<(), L::Elem> {
unsafe {
let link_ptr = L::elem_to_link(elem);
let link = link_ptr.as_ref();
// Prevent multiple threads from inserting the same link at once.
//
// Say CPU 1 and CPU 2 are trying adding the thread A to their own
// runqueues simultaneously:
//
// CPU 1: runqueue1.push_back(thread_A.clone());
// CPU 2: runqueue2.push_back(thread_A.clone());
//
// In this case, one of the threads (CPU1 or CPU2) fail to insert
// the element.
if!link.push_lock.swap(false, Ordering::SeqCst) {
return Err(L::from_link_to_elem(link_ptr));
}
assert!(
!link.is_in_use(),
"tried to insert an already inserted link to another list"
);
if let Some(tail) = self.tail {
tail.as_ref().next.set(Some(link_ptr));
}
if self.head.is_none() |
link.prev.set(self.tail);
link.next.set(None);
self.tail = Some(link_ptr);
Ok(())
}
}
/// Pops the element at the beginning of the list. `O(1)`.
pub fn pop_front(&mut self) -> Option<L::Elem> {
match self.head {
Some(head) => unsafe {
self.remove(head.as_ref());
Some(L::from_link_to_elem(head))
},
None => None,
}
}
pub fn is_link_in_list(&mut self, link: &ListLink<L>) -> bool {
let elem_nonnull = unsafe { NonNull::new_unchecked(link as *const _ as *mut _) };
self.walk_links(|link| {
if link == elem_nonnull {
ControlFlow::Break(true)
} else {
ControlFlow::Continue(())
}
})
.unwrap_or(false)
}
/// Removes an element in the list. `O(1)`.
///
/// Caller must make sure that the element is in the list.
pub fn remove(&mut self, link: &ListLink<L>) {
// Because we don't need the access to `self`, we can define this
// method as `List::remove(elem: L::Elem)`. However, since it allows
// simultaneous removals and it would break links, we intentionally
// require `&mut self` to prevent such a race.
// Make sure the element is in the list or this method would mutate other
// lists.
debug_assert!(self.is_link_in_list(link));
match (link.prev.get(), link.next.get()) {
(Some(prev), Some(next)) => unsafe {
next.as_ref().prev.set(Some(prev));
prev.as_ref().next.set(Some(next));
},
(None, Some(next)) => unsafe {
next.as_ref().prev.set(None);
self.head = Some(next);
},
(Some(prev), None) => unsafe {
prev.as_ref().next.set(None);
self.tail = Some(prev);
},
(None, None) => {
self.head = None;
self.tail = None;
}
}
link.prev.set(None);
link.next.set(None);
debug_assert!(!link.push_lock.swap(true, Ordering::SeqCst));
}
fn iter(&self) -> Iter<'_, L> {
Iter {
current: self.head,
_pd: &PhantomData,
}
}
}
impl<L: ListNode> Default for List<L> {
fn default() -> Self {
Self::new()
}
}
pub struct Iter<'a, L: ListNode> {
current: Option<NonNull<ListLink<L>>>,
_pd: &'a PhantomData<L>,
}
impl<'a, L: ListNode> Iterator for Iter<'a, L> {
type Item = &'a <L::Elem as Static>::Inner;
fn next(&mut self) -> Option<&'a <L::Elem as Static>::Inner> {
self.current.map(|current| unsafe {
self.current = current.as_ref().next();
L::from_link_to_nonnull(current).as_ref()
})
}
}
impl<'a, L: ListNode> IntoIterator for &'a List<L> {
type Item = &'a <L::Elem as Static>::Inner;
type IntoIter = Iter<'a, L>;
fn into_iter(self) -> Iter<'a, L> {
self.iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
define_list_node!(MyList, Arc<MyElem>, node);
#[derive(Debug)]
struct MyElem {
value: usize,
node: ListLink<MyList>,
}
#[test]
pub fn push_and_pop() {
let mut l: List<MyList> = List::new();
let elem1 = Arc::new(MyElem {
value: 123,
node: Default::default(),
});
let elem2 = Arc::new(MyElem {
value: 456,
node: Default::default(),
});
assert_eq!(l.len(), 0);
assert!(l.push_back(elem1).is_ok());
assert_eq!(l.len(), 1);
assert!(l.push_back(elem2).is_ok());
assert_eq!(l.len(), 2);
assert_eq!(l.pop_front().map(|e| e.value), Some(123));
assert_eq!(l.len(), 1);
assert_eq!(l.pop_front().map(|e| e.value), Some(456));
assert_eq!(l.len(), 0);
assert_eq!(l.pop_front().map(|e| e.value), None);
}
fn populate_3_elems() -> (List<MyList>, Arc<MyElem>, Arc<MyElem>, Arc<MyElem>) {
let mut l: List<MyList> = List::new();
let elem1 = Arc::new(MyElem {
value: 1,
node: Default::default(),
});
let elem2 = Arc::new(MyElem {
value: 20,
node: Default::default(),
});
let elem3 = Arc::new(MyElem {
value: 300,
node: Default::default(),
});
assert!(l.push_back(elem1.clone()).is_ok());
assert!(l.push_back(elem2.clone()).is_ok());
assert!(l.push_back(elem3.clone()).is_ok());
(l, elem1, elem2, elem3)
}
#[test]
pub fn iter() {
let mut l: List<MyList> = List::new();
assert!(l.iter().next().is_none());
let elem1 = Arc::new(MyElem {
value: 1,
node: Default::default(),
});
assert!(l.push_back(elem1).is_ok());
let mut iter = l.iter();
assert!(iter.next().is_some());
assert!(iter.next().is_none());
}
#[test]
pub fn remove_elem_at_head() {
let (mut l, elem1, _elem2, _elem3) = populate_3_elems();
l.remove(&elem1.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 320);
}
#[test]
pub fn remove_elem_at_middle() {
let (mut l, _elem1, elem2, _elem3) = populate_3_elems();
l.remove(&elem2.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 301);
}
#[test]
pub fn remove_elem_at_tail() {
let (mut l, _elem1, _elem2, elem3) = populate_3_elems();
l.remove(&elem3.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 21);
}
}
| {
self.head = Some(link_ptr);
} | conditional_block |
linked_list.rs | //! An intrusive linked list.
//!
//! ```ignore
//! use intrusive::{define_list_node, linked_list::{List, ListLink}};
//! use std::sync::Arc;
//!
//! struct Thread {
//! id: usize, | //!
//! let mut threads = List::<ThreadsNode>::new();
//! let thread1 = Arc::new(Thread { id: 1, link: Default::default() });
//! threads.push_back(thread1);
//! ```
//!
use core::cell::Cell;
use core::fmt::{self, Debug, Formatter};
use core::marker::PhantomData;
use core::ops::ControlFlow;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicBool, Ordering};
pub use etc::offset_of;
use crate::Static;
/// A trait represents a container that can be inserted into the linked list.
pub trait ListNode {
type Elem: Static;
fn elem_to_link(elem: Self::Elem) -> NonNull<ListLink<Self>>;
fn from_link_to_elem(link: NonNull<ListLink<Self>>) -> Self::Elem;
fn from_link_to_nonnull(
link: NonNull<ListLink<Self>>,
) -> NonNull<<Self::Elem as Static>::Inner>;
}
/// A link fields of the linked list embedded in a container.
pub struct ListLink<L:?Sized> {
push_lock: AtomicBool,
next: Cell<Option<NonNull<ListLink<L>>>>,
prev: Cell<Option<NonNull<ListLink<L>>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> ListLink<L> {
pub fn empty() -> ListLink<L> {
ListLink {
push_lock: AtomicBool::new(true),
next: Cell::new(None),
prev: Cell::new(None),
_pd: PhantomData,
}
}
pub fn is_in_use(&self) -> bool {
match (self.next.get(), self.prev.get()) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => unreachable!(),
}
}
fn next(&self) -> Option<NonNull<ListLink<L>>> {
self.next.get()
}
}
impl<L: ListNode> Debug for ListLink<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"ListLink<{}>",
if self.is_in_use() {
"in list"
} else {
"not in list"
}
)
}
}
impl<L: ListNode> Default for ListLink<L> {
fn default() -> Self {
ListLink::empty()
}
}
// SAFETY: ListLink is protected by `push_lock`.
unsafe impl<L: ListNode + Sync> Sync for ListLink<L> {}
#[macro_export(local_inner_macros)]
macro_rules! define_list_node {
($list_name:ident, $elem:ty, $field:ident) => {
struct $list_name;
impl $crate::linked_list::ListNode for $list_name {
type Elem = $elem;
fn elem_to_link(elem: Self::Elem) -> core::ptr::NonNull<ListLink<Self>> {
unsafe {
core::ptr::NonNull::new_unchecked(
&mut ((*$crate::Static::into_nonnull(elem).as_mut()).$field) as *mut _,
)
}
}
fn from_link_to_elem(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> Self::Elem {
let nonnull = Self::from_link_to_nonnull(link);
unsafe { $crate::Static::from_nonnull(nonnull) }
}
fn from_link_to_nonnull(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> core::ptr::NonNull<<Self::Elem as $crate::Static>::Inner> {
let offset =
$crate::linked_list::offset_of!(<Self::Elem as $crate::Static>::Inner, $field);
// SAFETY: It won't be null since link is nonnull.
unsafe {
core::ptr::NonNull::new_unchecked(
(link.as_ptr() as *mut u8).offset(-offset) as *mut _
)
}
}
}
};
}
/// An intrusive linked list.
pub struct List<L: ListNode> {
head: Option<NonNull<ListLink<L>>>,
tail: Option<NonNull<ListLink<L>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> List<L> {
/// Creates an empty linked list.
pub const fn new() -> List<L> {
List {
head: None,
tail: None,
_pd: PhantomData,
}
}
/// Returns `true` if the list is empty. `O(1)`.
pub fn is_empty(&self) -> bool {
self.head.is_some()
}
/// Returns the number of elements. `O(n)`.
pub fn len(&self) -> usize {
let mut len = 0;
for _ in self.iter() {
len += 1;
}
len
}
/// Removes and returns the first element satisfying the predicate `pred`. It
/// returns `None` the list is empty or `f` returned only `false` on all
/// elements. `O(n)`.
pub fn remove_first_if<F>(&mut self, pred: F) -> Option<L::Elem>
where
F: Fn(&<L::Elem as Static>::Inner) -> bool,
{
let mut current = self.head;
while let Some(link) = current {
if pred(unsafe { L::from_link_to_nonnull(link).as_ref() }) {
self.remove(unsafe { link.as_ref() });
return Some(L::from_link_to_elem(link));
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Calls the callback for each element. `O(n)`.
fn walk_links<F, R>(&self, mut f: F) -> Option<R>
where
F: FnMut(NonNull<ListLink<L>>) -> ControlFlow<R>,
{
let mut current = self.head;
while let Some(link) = current {
if let ControlFlow::Break(value) = f(link) {
return Some(value);
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Inserts an element at the end the list. Returns `Err(elem)` if any other
/// thread have just inserted the element to a (possibly another) list using
/// the same link as defined in `L`. `O(1)`.
pub fn push_back(&mut self, elem: L::Elem) -> Result<(), L::Elem> {
unsafe {
let link_ptr = L::elem_to_link(elem);
let link = link_ptr.as_ref();
// Prevent multiple threads from inserting the same link at once.
//
// Say CPU 1 and CPU 2 are trying adding the thread A to their own
// runqueues simultaneously:
//
// CPU 1: runqueue1.push_back(thread_A.clone());
// CPU 2: runqueue2.push_back(thread_A.clone());
//
// In this case, one of the threads (CPU1 or CPU2) fail to insert
// the element.
if!link.push_lock.swap(false, Ordering::SeqCst) {
return Err(L::from_link_to_elem(link_ptr));
}
assert!(
!link.is_in_use(),
"tried to insert an already inserted link to another list"
);
if let Some(tail) = self.tail {
tail.as_ref().next.set(Some(link_ptr));
}
if self.head.is_none() {
self.head = Some(link_ptr);
}
link.prev.set(self.tail);
link.next.set(None);
self.tail = Some(link_ptr);
Ok(())
}
}
/// Pops the element at the beginning of the list. `O(1)`.
pub fn pop_front(&mut self) -> Option<L::Elem> {
match self.head {
Some(head) => unsafe {
self.remove(head.as_ref());
Some(L::from_link_to_elem(head))
},
None => None,
}
}
pub fn is_link_in_list(&mut self, link: &ListLink<L>) -> bool {
let elem_nonnull = unsafe { NonNull::new_unchecked(link as *const _ as *mut _) };
self.walk_links(|link| {
if link == elem_nonnull {
ControlFlow::Break(true)
} else {
ControlFlow::Continue(())
}
})
.unwrap_or(false)
}
/// Removes an element in the list. `O(1)`.
///
/// Caller must make sure that the element is in the list.
pub fn remove(&mut self, link: &ListLink<L>) {
// Because we don't need the access to `self`, we can define this
// method as `List::remove(elem: L::Elem)`. However, since it allows
// simultaneous removals and it would break links, we intentionally
// require `&mut self` to prevent such a race.
// Make sure the element is in the list or this method would mutate other
// lists.
debug_assert!(self.is_link_in_list(link));
match (link.prev.get(), link.next.get()) {
(Some(prev), Some(next)) => unsafe {
next.as_ref().prev.set(Some(prev));
prev.as_ref().next.set(Some(next));
},
(None, Some(next)) => unsafe {
next.as_ref().prev.set(None);
self.head = Some(next);
},
(Some(prev), None) => unsafe {
prev.as_ref().next.set(None);
self.tail = Some(prev);
},
(None, None) => {
self.head = None;
self.tail = None;
}
}
link.prev.set(None);
link.next.set(None);
debug_assert!(!link.push_lock.swap(true, Ordering::SeqCst));
}
fn iter(&self) -> Iter<'_, L> {
Iter {
current: self.head,
_pd: &PhantomData,
}
}
}
impl<L: ListNode> Default for List<L> {
fn default() -> Self {
Self::new()
}
}
pub struct Iter<'a, L: ListNode> {
current: Option<NonNull<ListLink<L>>>,
_pd: &'a PhantomData<L>,
}
impl<'a, L: ListNode> Iterator for Iter<'a, L> {
type Item = &'a <L::Elem as Static>::Inner;
fn next(&mut self) -> Option<&'a <L::Elem as Static>::Inner> {
self.current.map(|current| unsafe {
self.current = current.as_ref().next();
L::from_link_to_nonnull(current).as_ref()
})
}
}
impl<'a, L: ListNode> IntoIterator for &'a List<L> {
type Item = &'a <L::Elem as Static>::Inner;
type IntoIter = Iter<'a, L>;
fn into_iter(self) -> Iter<'a, L> {
self.iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
define_list_node!(MyList, Arc<MyElem>, node);
#[derive(Debug)]
struct MyElem {
value: usize,
node: ListLink<MyList>,
}
#[test]
pub fn push_and_pop() {
let mut l: List<MyList> = List::new();
let elem1 = Arc::new(MyElem {
value: 123,
node: Default::default(),
});
let elem2 = Arc::new(MyElem {
value: 456,
node: Default::default(),
});
assert_eq!(l.len(), 0);
assert!(l.push_back(elem1).is_ok());
assert_eq!(l.len(), 1);
assert!(l.push_back(elem2).is_ok());
assert_eq!(l.len(), 2);
assert_eq!(l.pop_front().map(|e| e.value), Some(123));
assert_eq!(l.len(), 1);
assert_eq!(l.pop_front().map(|e| e.value), Some(456));
assert_eq!(l.len(), 0);
assert_eq!(l.pop_front().map(|e| e.value), None);
}
fn populate_3_elems() -> (List<MyList>, Arc<MyElem>, Arc<MyElem>, Arc<MyElem>) {
let mut l: List<MyList> = List::new();
let elem1 = Arc::new(MyElem {
value: 1,
node: Default::default(),
});
let elem2 = Arc::new(MyElem {
value: 20,
node: Default::default(),
});
let elem3 = Arc::new(MyElem {
value: 300,
node: Default::default(),
});
assert!(l.push_back(elem1.clone()).is_ok());
assert!(l.push_back(elem2.clone()).is_ok());
assert!(l.push_back(elem3.clone()).is_ok());
(l, elem1, elem2, elem3)
}
#[test]
pub fn iter() {
let mut l: List<MyList> = List::new();
assert!(l.iter().next().is_none());
let elem1 = Arc::new(MyElem {
value: 1,
node: Default::default(),
});
assert!(l.push_back(elem1).is_ok());
let mut iter = l.iter();
assert!(iter.next().is_some());
assert!(iter.next().is_none());
}
#[test]
pub fn remove_elem_at_head() {
let (mut l, elem1, _elem2, _elem3) = populate_3_elems();
l.remove(&elem1.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 320);
}
#[test]
pub fn remove_elem_at_middle() {
let (mut l, _elem1, elem2, _elem3) = populate_3_elems();
l.remove(&elem2.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 301);
}
#[test]
pub fn remove_elem_at_tail() {
let (mut l, _elem1, _elem2, elem3) = populate_3_elems();
l.remove(&elem3.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 21);
}
} | //! link: ListLink<ThreadsNode>,
//! }
//!
//! define_list_node!(ThreadsNode, Arc<Thread>, link); | random_line_split |
linked_list.rs | //! An intrusive linked list.
//!
//! ```ignore
//! use intrusive::{define_list_node, linked_list::{List, ListLink}};
//! use std::sync::Arc;
//!
//! struct Thread {
//! id: usize,
//! link: ListLink<ThreadsNode>,
//! }
//!
//! define_list_node!(ThreadsNode, Arc<Thread>, link);
//!
//! let mut threads = List::<ThreadsNode>::new();
//! let thread1 = Arc::new(Thread { id: 1, link: Default::default() });
//! threads.push_back(thread1);
//! ```
//!
use core::cell::Cell;
use core::fmt::{self, Debug, Formatter};
use core::marker::PhantomData;
use core::ops::ControlFlow;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicBool, Ordering};
pub use etc::offset_of;
use crate::Static;
/// A trait represents a container that can be inserted into the linked list.
pub trait ListNode {
type Elem: Static;
fn elem_to_link(elem: Self::Elem) -> NonNull<ListLink<Self>>;
fn from_link_to_elem(link: NonNull<ListLink<Self>>) -> Self::Elem;
fn from_link_to_nonnull(
link: NonNull<ListLink<Self>>,
) -> NonNull<<Self::Elem as Static>::Inner>;
}
/// A link fields of the linked list embedded in a container.
pub struct ListLink<L:?Sized> {
push_lock: AtomicBool,
next: Cell<Option<NonNull<ListLink<L>>>>,
prev: Cell<Option<NonNull<ListLink<L>>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> ListLink<L> {
pub fn empty() -> ListLink<L> {
ListLink {
push_lock: AtomicBool::new(true),
next: Cell::new(None),
prev: Cell::new(None),
_pd: PhantomData,
}
}
pub fn is_in_use(&self) -> bool {
match (self.next.get(), self.prev.get()) {
(Some(_), Some(_)) => true,
(None, None) => false,
_ => unreachable!(),
}
}
fn next(&self) -> Option<NonNull<ListLink<L>>> {
self.next.get()
}
}
impl<L: ListNode> Debug for ListLink<L> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"ListLink<{}>",
if self.is_in_use() {
"in list"
} else {
"not in list"
}
)
}
}
impl<L: ListNode> Default for ListLink<L> {
fn default() -> Self {
ListLink::empty()
}
}
// SAFETY: ListLink is protected by `push_lock`.
unsafe impl<L: ListNode + Sync> Sync for ListLink<L> {}
#[macro_export(local_inner_macros)]
macro_rules! define_list_node {
($list_name:ident, $elem:ty, $field:ident) => {
struct $list_name;
impl $crate::linked_list::ListNode for $list_name {
type Elem = $elem;
fn elem_to_link(elem: Self::Elem) -> core::ptr::NonNull<ListLink<Self>> {
unsafe {
core::ptr::NonNull::new_unchecked(
&mut ((*$crate::Static::into_nonnull(elem).as_mut()).$field) as *mut _,
)
}
}
fn from_link_to_elem(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> Self::Elem {
let nonnull = Self::from_link_to_nonnull(link);
unsafe { $crate::Static::from_nonnull(nonnull) }
}
fn from_link_to_nonnull(
link: core::ptr::NonNull<$crate::linked_list::ListLink<Self>>,
) -> core::ptr::NonNull<<Self::Elem as $crate::Static>::Inner> {
let offset =
$crate::linked_list::offset_of!(<Self::Elem as $crate::Static>::Inner, $field);
// SAFETY: It won't be null since link is nonnull.
unsafe {
core::ptr::NonNull::new_unchecked(
(link.as_ptr() as *mut u8).offset(-offset) as *mut _
)
}
}
}
};
}
/// An intrusive linked list.
pub struct List<L: ListNode> {
head: Option<NonNull<ListLink<L>>>,
tail: Option<NonNull<ListLink<L>>>,
_pd: PhantomData<L>,
}
impl<L: ListNode> List<L> {
/// Creates an empty linked list.
pub const fn new() -> List<L> {
List {
head: None,
tail: None,
_pd: PhantomData,
}
}
/// Returns `true` if the list is empty. `O(1)`.
pub fn is_empty(&self) -> bool {
self.head.is_some()
}
/// Returns the number of elements. `O(n)`.
pub fn len(&self) -> usize {
let mut len = 0;
for _ in self.iter() {
len += 1;
}
len
}
/// Removes and returns the first element satisfying the predicate `pred`. It
/// returns `None` the list is empty or `f` returned only `false` on all
/// elements. `O(n)`.
pub fn remove_first_if<F>(&mut self, pred: F) -> Option<L::Elem>
where
F: Fn(&<L::Elem as Static>::Inner) -> bool,
{
let mut current = self.head;
while let Some(link) = current {
if pred(unsafe { L::from_link_to_nonnull(link).as_ref() }) {
self.remove(unsafe { link.as_ref() });
return Some(L::from_link_to_elem(link));
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Calls the callback for each element. `O(n)`.
fn walk_links<F, R>(&self, mut f: F) -> Option<R>
where
F: FnMut(NonNull<ListLink<L>>) -> ControlFlow<R>,
{
let mut current = self.head;
while let Some(link) = current {
if let ControlFlow::Break(value) = f(link) {
return Some(value);
}
current = unsafe { link.as_ref().next() };
}
None
}
/// Inserts an element at the end the list. Returns `Err(elem)` if any other
/// thread have just inserted the element to a (possibly another) list using
/// the same link as defined in `L`. `O(1)`.
pub fn push_back(&mut self, elem: L::Elem) -> Result<(), L::Elem> {
unsafe {
let link_ptr = L::elem_to_link(elem);
let link = link_ptr.as_ref();
// Prevent multiple threads from inserting the same link at once.
//
// Say CPU 1 and CPU 2 are trying adding the thread A to their own
// runqueues simultaneously:
//
// CPU 1: runqueue1.push_back(thread_A.clone());
// CPU 2: runqueue2.push_back(thread_A.clone());
//
// In this case, one of the threads (CPU1 or CPU2) fail to insert
// the element.
if!link.push_lock.swap(false, Ordering::SeqCst) {
return Err(L::from_link_to_elem(link_ptr));
}
assert!(
!link.is_in_use(),
"tried to insert an already inserted link to another list"
);
if let Some(tail) = self.tail {
tail.as_ref().next.set(Some(link_ptr));
}
if self.head.is_none() {
self.head = Some(link_ptr);
}
link.prev.set(self.tail);
link.next.set(None);
self.tail = Some(link_ptr);
Ok(())
}
}
/// Pops the element at the beginning of the list. `O(1)`.
pub fn pop_front(&mut self) -> Option<L::Elem> {
match self.head {
Some(head) => unsafe {
self.remove(head.as_ref());
Some(L::from_link_to_elem(head))
},
None => None,
}
}
pub fn is_link_in_list(&mut self, link: &ListLink<L>) -> bool {
let elem_nonnull = unsafe { NonNull::new_unchecked(link as *const _ as *mut _) };
self.walk_links(|link| {
if link == elem_nonnull {
ControlFlow::Break(true)
} else {
ControlFlow::Continue(())
}
})
.unwrap_or(false)
}
/// Removes an element in the list. `O(1)`.
///
/// Caller must make sure that the element is in the list.
pub fn remove(&mut self, link: &ListLink<L>) {
// Because we don't need the access to `self`, we can define this
// method as `List::remove(elem: L::Elem)`. However, since it allows
// simultaneous removals and it would break links, we intentionally
// require `&mut self` to prevent such a race.
// Make sure the element is in the list or this method would mutate other
// lists.
debug_assert!(self.is_link_in_list(link));
match (link.prev.get(), link.next.get()) {
(Some(prev), Some(next)) => unsafe {
next.as_ref().prev.set(Some(prev));
prev.as_ref().next.set(Some(next));
},
(None, Some(next)) => unsafe {
next.as_ref().prev.set(None);
self.head = Some(next);
},
(Some(prev), None) => unsafe {
prev.as_ref().next.set(None);
self.tail = Some(prev);
},
(None, None) => {
self.head = None;
self.tail = None;
}
}
link.prev.set(None);
link.next.set(None);
debug_assert!(!link.push_lock.swap(true, Ordering::SeqCst));
}
fn | (&self) -> Iter<'_, L> {
Iter {
current: self.head,
_pd: &PhantomData,
}
}
}
impl<L: ListNode> Default for List<L> {
fn default() -> Self {
Self::new()
}
}
pub struct Iter<'a, L: ListNode> {
current: Option<NonNull<ListLink<L>>>,
_pd: &'a PhantomData<L>,
}
impl<'a, L: ListNode> Iterator for Iter<'a, L> {
type Item = &'a <L::Elem as Static>::Inner;
fn next(&mut self) -> Option<&'a <L::Elem as Static>::Inner> {
self.current.map(|current| unsafe {
self.current = current.as_ref().next();
L::from_link_to_nonnull(current).as_ref()
})
}
}
impl<'a, L: ListNode> IntoIterator for &'a List<L> {
type Item = &'a <L::Elem as Static>::Inner;
type IntoIter = Iter<'a, L>;
fn into_iter(self) -> Iter<'a, L> {
self.iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
define_list_node!(MyList, Arc<MyElem>, node);
#[derive(Debug)]
struct MyElem {
value: usize,
node: ListLink<MyList>,
}
#[test]
pub fn push_and_pop() {
let mut l: List<MyList> = List::new();
let elem1 = Arc::new(MyElem {
value: 123,
node: Default::default(),
});
let elem2 = Arc::new(MyElem {
value: 456,
node: Default::default(),
});
assert_eq!(l.len(), 0);
assert!(l.push_back(elem1).is_ok());
assert_eq!(l.len(), 1);
assert!(l.push_back(elem2).is_ok());
assert_eq!(l.len(), 2);
assert_eq!(l.pop_front().map(|e| e.value), Some(123));
assert_eq!(l.len(), 1);
assert_eq!(l.pop_front().map(|e| e.value), Some(456));
assert_eq!(l.len(), 0);
assert_eq!(l.pop_front().map(|e| e.value), None);
}
fn populate_3_elems() -> (List<MyList>, Arc<MyElem>, Arc<MyElem>, Arc<MyElem>) {
let mut l: List<MyList> = List::new();
let elem1 = Arc::new(MyElem {
value: 1,
node: Default::default(),
});
let elem2 = Arc::new(MyElem {
value: 20,
node: Default::default(),
});
let elem3 = Arc::new(MyElem {
value: 300,
node: Default::default(),
});
assert!(l.push_back(elem1.clone()).is_ok());
assert!(l.push_back(elem2.clone()).is_ok());
assert!(l.push_back(elem3.clone()).is_ok());
(l, elem1, elem2, elem3)
}
#[test]
pub fn iter() {
let mut l: List<MyList> = List::new();
assert!(l.iter().next().is_none());
let elem1 = Arc::new(MyElem {
value: 1,
node: Default::default(),
});
assert!(l.push_back(elem1).is_ok());
let mut iter = l.iter();
assert!(iter.next().is_some());
assert!(iter.next().is_none());
}
#[test]
pub fn remove_elem_at_head() {
let (mut l, elem1, _elem2, _elem3) = populate_3_elems();
l.remove(&elem1.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 320);
}
#[test]
pub fn remove_elem_at_middle() {
let (mut l, _elem1, elem2, _elem3) = populate_3_elems();
l.remove(&elem2.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 301);
}
#[test]
pub fn remove_elem_at_tail() {
let (mut l, _elem1, _elem2, elem3) = populate_3_elems();
l.remove(&elem3.node);
assert_eq!(l.iter().map(|e| e.value).sum::<usize>(), 21);
}
}
| iter | identifier_name |
view.rs | // Copyright (c) 2021 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Image views.
//!
//! This module contains types related to image views. An image view wraps around
//! an image and describes how the GPU should interpret the data. It is needed when an image is
//! to be used in a shader descriptor or as a framebuffer attachment.
use std::error;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::mem::MaybeUninit;
use std::ops::Range;
use std::ptr;
use std::sync::Arc;
use crate::device::Device;
use crate::format::Format;
use crate::format::FormatTy;
use crate::image::sys::UnsafeImage;
use crate::image::ImageAccess;
use crate::image::ImageDimensions;
use crate::memory::DeviceMemoryAllocError;
use crate::sampler::Sampler;
use crate::check_errors;
use crate::vk;
use crate::OomError;
use crate::SafeDeref;
use crate::VulkanObject;
/// A safe image view that checks for validity and keeps its attached image alive.
pub struct ImageView<I>
where
I: ImageAccess,
{
image: I,
inner: UnsafeImageView,
array_layers: Range<u32>,
format: Format,
identity_swizzle: bool,
ty: ImageViewType,
}
impl<I> ImageView<I>
where
I: ImageAccess,
{
/// Creates a new image view spanning all mipmap levels and array layers in the image.
///
/// The view type is automatically determined from the image, based on its dimensions and
/// number of layers.
#[inline]
pub fn new(image: I) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let ty = match image.dimensions() {
ImageDimensions::Dim1d {
array_layers: 1,..
} => ImageViewType::Dim1d,
ImageDimensions::Dim1d {.. } => ImageViewType::Dim1dArray,
ImageDimensions::Dim2d {
array_layers: 1,..
} => ImageViewType::Dim2d,
ImageDimensions::Dim2d {.. } => ImageViewType::Dim2dArray,
ImageDimensions::Dim3d {.. } => ImageViewType::Dim3d,
};
Self::with_type(image, ty)
}
/// Crates a new image view with a custom type.
pub fn with_type(
image: I,
ty: ImageViewType,
) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let mipmap_levels = 0..image.mipmap_levels();
let array_layers = 0..image.dimensions().array_layers();
Self::with_type_ranges(image, ty, mipmap_levels, array_layers)
}
/// Creates a new image view with a custom type and ranges of mipmap levels and array layers.
pub fn with_type_ranges(
image: I,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let dimensions = image.dimensions();
let format = image.format();
let image_inner = image.inner().image;
let usage = image_inner.usage();
let flags = image_inner.flags();
if mipmap_levels.end <= mipmap_levels.start
|| mipmap_levels.end > image_inner.mipmap_levels()
{
return Err(ImageViewCreationError::MipMapLevelsOutOfRange);
}
if array_layers.end <= array_layers.start || array_layers.end > dimensions.array_layers() {
return Err(ImageViewCreationError::ArrayLayersOutOfRange);
}
if!(usage.sampled
|| usage.storage
|| usage.color_attachment
|| usage.depth_stencil_attachment
|| usage.input_attachment
|| usage.transient_attachment)
{
return Err(ImageViewCreationError::InvalidImageUsage);
}
// Check for compatibility with the image
match (
ty,
image.dimensions(),
array_layers.end - array_layers.start,
mipmap_levels.end - mipmap_levels.start,
) {
(ImageViewType::Dim1d, ImageDimensions::Dim1d {.. }, 1, _) => (),
(ImageViewType::Dim1dArray, ImageDimensions::Dim1d {.. }, _, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim2d {.. }, 1, _) => (),
(ImageViewType::Dim2dArray, ImageDimensions::Dim2d {.. }, _, _) => (),
(ImageViewType::Cubemap, ImageDimensions::Dim2d {.. }, 6, _)
if flags.cube_compatible =>
{
()
}
(ImageViewType::CubemapArray, ImageDimensions::Dim2d {.. }, n, _)
if flags.cube_compatible && n % 6 == 0 =>
{
()
}
(ImageViewType::Dim3d, ImageDimensions::Dim3d {.. }, 1, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim3d {.. }, 1, 1)
if flags.array_2d_compatible =>
{
()
}
(ImageViewType::Dim2dArray, ImageDimensions::Dim3d {.. }, _, 1)
if flags.array_2d_compatible =>
{
()
}
_ => return Err(ImageViewCreationError::IncompatibleType),
}
let inner =
unsafe { UnsafeImageView::new(image_inner, ty, mipmap_levels, array_layers.clone())? };
Ok(Arc::new(ImageView {
image,
inner,
array_layers,
format,
identity_swizzle: true, // FIXME:
ty,
}))
}
/// Returns the wrapped image that this image view was created from.
pub fn image(&self) -> &I {
&self.image
}
}
/// Error that can happen when creating an image view.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ImageViewCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocError),
/// The specified range of array layers was out of range for the image.
ArrayLayersOutOfRange,
/// The specified range of mipmap levels was out of range for the image.
MipMapLevelsOutOfRange,
/// The requested [`ImageViewType`] was not compatible with the image, or with the specified ranges of array layers and mipmap levels.
IncompatibleType,
/// The image was not created with
/// [one of the required usages](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#valid-imageview-imageusage)
/// for image views.
InvalidImageUsage,
}
impl error::Error for ImageViewCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ImageViewCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for ImageViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
ImageViewCreationError::AllocError(err) => "allocating memory failed",
ImageViewCreationError::ArrayLayersOutOfRange => "array layers are out of range",
ImageViewCreationError::MipMapLevelsOutOfRange => "mipmap levels are out of range",
ImageViewCreationError::IncompatibleType =>
"image view type is not compatible with image, array layers or mipmap levels",
ImageViewCreationError::InvalidImageUsage =>
"the usage of the image is not compatible with image views",
}
)
}
}
impl From<OomError> for ImageViewCreationError {
#[inline]
fn from(err: OomError) -> ImageViewCreationError {
ImageViewCreationError::AllocError(DeviceMemoryAllocError::OomError(err))
}
}
/// A low-level wrapper around a `vkImageView`.
pub struct UnsafeImageView {
view: vk::ImageView,
device: Arc<Device>,
}
impl UnsafeImageView {
/// Creates a new view from an image.
///
/// # Safety
/// - The returned `UnsafeImageView` must not outlive `image`.
/// - `image` must have a usage that is compatible with image views.
/// - `ty` must be compatible with the dimensions and flags of the image.
/// - `mipmap_levels` must not be empty, must be within the range of levels of the image, and be compatible with the requested `ty`.
/// - `array_layers` must not be empty, must be within the range of layers of the image, and be compatible with the requested `ty`.
///
/// # Panics
/// Panics if the image is a YcbCr image, since the Vulkano API is not yet flexible enough to
/// specify the aspect of image.
pub unsafe fn new(
image: &UnsafeImage,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<UnsafeImageView, OomError> {
let vk = image.device().pointers();
debug_assert!(mipmap_levels.end > mipmap_levels.start);
debug_assert!(mipmap_levels.end <= image.mipmap_levels());
debug_assert!(array_layers.end > array_layers.start);
debug_assert!(array_layers.end <= image.dimensions().array_layers());
let aspect_mask = match image.format().ty() {
FormatTy::Float | FormatTy::Uint | FormatTy::Sint | FormatTy::Compressed => {
vk::IMAGE_ASPECT_COLOR_BIT
}
FormatTy::Depth => vk::IMAGE_ASPECT_DEPTH_BIT,
FormatTy::Stencil => vk::IMAGE_ASPECT_STENCIL_BIT,
FormatTy::DepthStencil => vk::IMAGE_ASPECT_DEPTH_BIT | vk::IMAGE_ASPECT_STENCIL_BIT,
// Not yet supported --> would require changes to ImmutableImage API :-)
FormatTy::Ycbcr => unimplemented!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
image: image.internal_object(),
viewType: ty.into(),
format: image.format() as u32,
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
levelCount: mipmap_levels.end - mipmap_levels.start,
baseArrayLayer: array_layers.start,
layerCount: array_layers.end - array_layers.start,
},
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateImageView(
image.device().internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(UnsafeImageView {
view,
device: image.device().clone(),
})
}
}
unsafe impl VulkanObject for UnsafeImageView {
type Object = vk::ImageView;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_IMAGE_VIEW;
#[inline]
fn internal_object(&self) -> vk::ImageView {
self.view
}
}
impl fmt::Debug for UnsafeImageView {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan image view {:?}>", self.view)
}
}
impl Drop for UnsafeImageView {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyImageView(self.device.internal_object(), self.view, ptr::null());
}
}
}
impl PartialEq for UnsafeImageView {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.view == other.view && self.device == other.device
}
}
impl Eq for UnsafeImageView {}
impl Hash for UnsafeImageView {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.view.hash(state);
self.device.hash(state);
}
}
/// The geometry type of an image view.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageViewType {
Dim1d,
Dim1dArray,
Dim2d,
Dim2dArray,
Dim3d,
Cubemap,
CubemapArray,
}
impl From<ImageViewType> for vk::ImageViewType {
fn from(image_view_type: ImageViewType) -> Self {
match image_view_type {
ImageViewType::Dim1d => vk::IMAGE_VIEW_TYPE_1D,
ImageViewType::Dim1dArray => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
ImageViewType::Dim2d => vk::IMAGE_VIEW_TYPE_2D,
ImageViewType::Dim2dArray => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
ImageViewType::Dim3d => vk::IMAGE_VIEW_TYPE_3D,
ImageViewType::Cubemap => vk::IMAGE_VIEW_TYPE_CUBE,
ImageViewType::CubemapArray => vk::IMAGE_VIEW_TYPE_CUBE_ARRAY,
}
}
}
/// Trait for types that represent the GPU can access an image view.
pub unsafe trait ImageViewAbstract {
/// Returns the wrapped image that this image view was created from.
fn image(&self) -> &dyn ImageAccess;
/// Returns the inner unsafe image view object used by this image view.
fn inner(&self) -> &UnsafeImageView;
/// Returns the range of array layers of the wrapped image that this view exposes.
fn array_layers(&self) -> Range<u32>;
/// Returns the format of this view. This can be different from the parent's format.
fn format(&self) -> Format;
/// Returns true if the view doesn't use components swizzling.
///
/// Must be true when the view is used as a framebuffer attachment or TODO: I don't remember
/// the other thing.
fn identity_swizzle(&self) -> bool;
/// Returns the [`ImageViewType`] of this image view.
fn ty(&self) -> ImageViewType;
/// Returns true if the given sampler can be used with this image view.
///
/// This method should check whether the sampler's configuration can be used with the format
/// of the view.
// TODO: return a Result and propagate it when binding to a descriptor set
fn | (&self, _sampler: &Sampler) -> bool {
true /* FIXME */
}
}
unsafe impl<I> ImageViewAbstract for ImageView<I>
where
I: ImageAccess,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
&self.image
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
&self.inner
}
#[inline]
fn array_layers(&self) -> Range<u32> {
self.array_layers.clone()
}
#[inline]
fn format(&self) -> Format {
// TODO: remove this default impl
self.format
}
#[inline]
fn identity_swizzle(&self) -> bool {
self.identity_swizzle
}
#[inline]
fn ty(&self) -> ImageViewType {
self.ty
}
}
unsafe impl<T> ImageViewAbstract for T
where
T: SafeDeref,
T::Target: ImageViewAbstract,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
(**self).image()
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
(**self).inner()
}
#[inline]
fn array_layers(&self) -> Range<u32> {
(**self).array_layers()
}
#[inline]
fn format(&self) -> Format {
(**self).format()
}
#[inline]
fn identity_swizzle(&self) -> bool {
(**self).identity_swizzle()
}
#[inline]
fn ty(&self) -> ImageViewType {
(**self).ty()
}
#[inline]
fn can_be_sampled(&self, sampler: &Sampler) -> bool {
(**self).can_be_sampled(sampler)
}
}
impl PartialEq for dyn ImageViewAbstract + Send + Sync {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner()
}
}
impl Eq for dyn ImageViewAbstract + Send + Sync {}
impl Hash for dyn ImageViewAbstract + Send + Sync {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
}
}
| can_be_sampled | identifier_name |
view.rs | // Copyright (c) 2021 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Image views.
//!
//! This module contains types related to image views. An image view wraps around
//! an image and describes how the GPU should interpret the data. It is needed when an image is
//! to be used in a shader descriptor or as a framebuffer attachment.
use std::error;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::mem::MaybeUninit;
use std::ops::Range;
use std::ptr;
use std::sync::Arc;
use crate::device::Device;
use crate::format::Format;
use crate::format::FormatTy;
use crate::image::sys::UnsafeImage;
use crate::image::ImageAccess;
use crate::image::ImageDimensions;
use crate::memory::DeviceMemoryAllocError;
use crate::sampler::Sampler;
use crate::check_errors;
use crate::vk;
use crate::OomError;
use crate::SafeDeref;
use crate::VulkanObject;
/// A safe image view that checks for validity and keeps its attached image alive.
pub struct ImageView<I>
where
I: ImageAccess,
{
image: I,
inner: UnsafeImageView,
array_layers: Range<u32>,
format: Format,
identity_swizzle: bool,
ty: ImageViewType,
}
impl<I> ImageView<I>
where
I: ImageAccess,
{
/// Creates a new image view spanning all mipmap levels and array layers in the image.
///
/// The view type is automatically determined from the image, based on its dimensions and
/// number of layers.
#[inline]
pub fn new(image: I) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let ty = match image.dimensions() {
ImageDimensions::Dim1d {
array_layers: 1,..
} => ImageViewType::Dim1d,
ImageDimensions::Dim1d {.. } => ImageViewType::Dim1dArray,
ImageDimensions::Dim2d {
array_layers: 1,..
} => ImageViewType::Dim2d,
ImageDimensions::Dim2d {.. } => ImageViewType::Dim2dArray,
ImageDimensions::Dim3d {.. } => ImageViewType::Dim3d,
};
Self::with_type(image, ty)
}
/// Crates a new image view with a custom type.
pub fn with_type(
image: I,
ty: ImageViewType,
) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let mipmap_levels = 0..image.mipmap_levels();
let array_layers = 0..image.dimensions().array_layers();
Self::with_type_ranges(image, ty, mipmap_levels, array_layers)
}
/// Creates a new image view with a custom type and ranges of mipmap levels and array layers.
pub fn with_type_ranges(
image: I,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let dimensions = image.dimensions();
let format = image.format();
let image_inner = image.inner().image;
let usage = image_inner.usage();
let flags = image_inner.flags();
if mipmap_levels.end <= mipmap_levels.start
|| mipmap_levels.end > image_inner.mipmap_levels()
{
return Err(ImageViewCreationError::MipMapLevelsOutOfRange);
}
if array_layers.end <= array_layers.start || array_layers.end > dimensions.array_layers() {
return Err(ImageViewCreationError::ArrayLayersOutOfRange);
}
if!(usage.sampled
|| usage.storage
|| usage.color_attachment
|| usage.depth_stencil_attachment
|| usage.input_attachment
|| usage.transient_attachment)
{
return Err(ImageViewCreationError::InvalidImageUsage);
}
// Check for compatibility with the image
match (
ty,
image.dimensions(),
array_layers.end - array_layers.start,
mipmap_levels.end - mipmap_levels.start,
) {
(ImageViewType::Dim1d, ImageDimensions::Dim1d {.. }, 1, _) => (),
(ImageViewType::Dim1dArray, ImageDimensions::Dim1d {.. }, _, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim2d {.. }, 1, _) => (),
(ImageViewType::Dim2dArray, ImageDimensions::Dim2d {.. }, _, _) => (),
(ImageViewType::Cubemap, ImageDimensions::Dim2d {.. }, 6, _)
if flags.cube_compatible =>
{
()
}
(ImageViewType::CubemapArray, ImageDimensions::Dim2d {.. }, n, _)
if flags.cube_compatible && n % 6 == 0 =>
{
()
}
(ImageViewType::Dim3d, ImageDimensions::Dim3d {.. }, 1, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim3d {.. }, 1, 1)
if flags.array_2d_compatible =>
{
()
}
(ImageViewType::Dim2dArray, ImageDimensions::Dim3d {.. }, _, 1)
if flags.array_2d_compatible =>
{
()
}
_ => return Err(ImageViewCreationError::IncompatibleType),
}
let inner =
unsafe { UnsafeImageView::new(image_inner, ty, mipmap_levels, array_layers.clone())? };
Ok(Arc::new(ImageView {
image,
inner,
array_layers,
format,
identity_swizzle: true, // FIXME:
ty,
}))
}
/// Returns the wrapped image that this image view was created from.
pub fn image(&self) -> &I {
&self.image
}
}
/// Error that can happen when creating an image view.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ImageViewCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocError),
/// The specified range of array layers was out of range for the image.
ArrayLayersOutOfRange,
/// The specified range of mipmap levels was out of range for the image.
MipMapLevelsOutOfRange,
/// The requested [`ImageViewType`] was not compatible with the image, or with the specified ranges of array layers and mipmap levels.
IncompatibleType,
/// The image was not created with
/// [one of the required usages](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#valid-imageview-imageusage)
/// for image views.
InvalidImageUsage,
}
impl error::Error for ImageViewCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ImageViewCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for ImageViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
ImageViewCreationError::AllocError(err) => "allocating memory failed",
ImageViewCreationError::ArrayLayersOutOfRange => "array layers are out of range",
ImageViewCreationError::MipMapLevelsOutOfRange => "mipmap levels are out of range",
ImageViewCreationError::IncompatibleType =>
"image view type is not compatible with image, array layers or mipmap levels",
ImageViewCreationError::InvalidImageUsage =>
"the usage of the image is not compatible with image views",
}
)
}
}
impl From<OomError> for ImageViewCreationError {
#[inline]
fn from(err: OomError) -> ImageViewCreationError {
ImageViewCreationError::AllocError(DeviceMemoryAllocError::OomError(err))
}
}
/// A low-level wrapper around a `vkImageView`.
pub struct UnsafeImageView {
view: vk::ImageView,
device: Arc<Device>,
}
impl UnsafeImageView {
/// Creates a new view from an image.
///
/// # Safety
/// - The returned `UnsafeImageView` must not outlive `image`.
/// - `image` must have a usage that is compatible with image views.
/// - `ty` must be compatible with the dimensions and flags of the image.
/// - `mipmap_levels` must not be empty, must be within the range of levels of the image, and be compatible with the requested `ty`.
/// - `array_layers` must not be empty, must be within the range of layers of the image, and be compatible with the requested `ty`.
///
/// # Panics
/// Panics if the image is a YcbCr image, since the Vulkano API is not yet flexible enough to
/// specify the aspect of image.
pub unsafe fn new(
image: &UnsafeImage,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<UnsafeImageView, OomError> {
let vk = image.device().pointers();
debug_assert!(mipmap_levels.end > mipmap_levels.start);
debug_assert!(mipmap_levels.end <= image.mipmap_levels());
debug_assert!(array_layers.end > array_layers.start);
debug_assert!(array_layers.end <= image.dimensions().array_layers());
let aspect_mask = match image.format().ty() {
FormatTy::Float | FormatTy::Uint | FormatTy::Sint | FormatTy::Compressed => {
vk::IMAGE_ASPECT_COLOR_BIT
}
FormatTy::Depth => vk::IMAGE_ASPECT_DEPTH_BIT,
FormatTy::Stencil => vk::IMAGE_ASPECT_STENCIL_BIT,
FormatTy::DepthStencil => vk::IMAGE_ASPECT_DEPTH_BIT | vk::IMAGE_ASPECT_STENCIL_BIT,
// Not yet supported --> would require changes to ImmutableImage API :-)
FormatTy::Ycbcr => unimplemented!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
image: image.internal_object(),
viewType: ty.into(),
format: image.format() as u32,
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
levelCount: mipmap_levels.end - mipmap_levels.start,
baseArrayLayer: array_layers.start,
layerCount: array_layers.end - array_layers.start,
},
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateImageView(
image.device().internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(UnsafeImageView {
view,
device: image.device().clone(),
})
}
}
unsafe impl VulkanObject for UnsafeImageView {
type Object = vk::ImageView;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_IMAGE_VIEW;
#[inline]
fn internal_object(&self) -> vk::ImageView {
self.view
}
}
impl fmt::Debug for UnsafeImageView {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan image view {:?}>", self.view)
}
}
impl Drop for UnsafeImageView {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyImageView(self.device.internal_object(), self.view, ptr::null());
}
}
}
impl PartialEq for UnsafeImageView {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.view == other.view && self.device == other.device
}
}
impl Eq for UnsafeImageView {}
impl Hash for UnsafeImageView {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.view.hash(state);
self.device.hash(state);
}
}
/// The geometry type of an image view.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageViewType {
Dim1d,
Dim1dArray,
Dim2d,
Dim2dArray,
Dim3d,
Cubemap,
CubemapArray,
}
impl From<ImageViewType> for vk::ImageViewType {
fn from(image_view_type: ImageViewType) -> Self {
match image_view_type {
ImageViewType::Dim1d => vk::IMAGE_VIEW_TYPE_1D,
ImageViewType::Dim1dArray => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
ImageViewType::Dim2d => vk::IMAGE_VIEW_TYPE_2D,
ImageViewType::Dim2dArray => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
ImageViewType::Dim3d => vk::IMAGE_VIEW_TYPE_3D,
ImageViewType::Cubemap => vk::IMAGE_VIEW_TYPE_CUBE,
ImageViewType::CubemapArray => vk::IMAGE_VIEW_TYPE_CUBE_ARRAY,
}
}
}
/// Trait for types that represent the GPU can access an image view.
pub unsafe trait ImageViewAbstract {
/// Returns the wrapped image that this image view was created from.
fn image(&self) -> &dyn ImageAccess;
/// Returns the inner unsafe image view object used by this image view.
fn inner(&self) -> &UnsafeImageView;
/// Returns the range of array layers of the wrapped image that this view exposes. |
/// Returns true if the view doesn't use components swizzling.
///
/// Must be true when the view is used as a framebuffer attachment or TODO: I don't remember
/// the other thing.
fn identity_swizzle(&self) -> bool;
/// Returns the [`ImageViewType`] of this image view.
fn ty(&self) -> ImageViewType;
/// Returns true if the given sampler can be used with this image view.
///
/// This method should check whether the sampler's configuration can be used with the format
/// of the view.
// TODO: return a Result and propagate it when binding to a descriptor set
fn can_be_sampled(&self, _sampler: &Sampler) -> bool {
true /* FIXME */
}
}
unsafe impl<I> ImageViewAbstract for ImageView<I>
where
I: ImageAccess,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
&self.image
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
&self.inner
}
#[inline]
fn array_layers(&self) -> Range<u32> {
self.array_layers.clone()
}
#[inline]
fn format(&self) -> Format {
// TODO: remove this default impl
self.format
}
#[inline]
fn identity_swizzle(&self) -> bool {
self.identity_swizzle
}
#[inline]
fn ty(&self) -> ImageViewType {
self.ty
}
}
unsafe impl<T> ImageViewAbstract for T
where
T: SafeDeref,
T::Target: ImageViewAbstract,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
(**self).image()
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
(**self).inner()
}
#[inline]
fn array_layers(&self) -> Range<u32> {
(**self).array_layers()
}
#[inline]
fn format(&self) -> Format {
(**self).format()
}
#[inline]
fn identity_swizzle(&self) -> bool {
(**self).identity_swizzle()
}
#[inline]
fn ty(&self) -> ImageViewType {
(**self).ty()
}
#[inline]
fn can_be_sampled(&self, sampler: &Sampler) -> bool {
(**self).can_be_sampled(sampler)
}
}
impl PartialEq for dyn ImageViewAbstract + Send + Sync {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner()
}
}
impl Eq for dyn ImageViewAbstract + Send + Sync {}
impl Hash for dyn ImageViewAbstract + Send + Sync {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
}
} | fn array_layers(&self) -> Range<u32>;
/// Returns the format of this view. This can be different from the parent's format.
fn format(&self) -> Format; | random_line_split |
view.rs | // Copyright (c) 2021 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Image views.
//!
//! This module contains types related to image views. An image view wraps around
//! an image and describes how the GPU should interpret the data. It is needed when an image is
//! to be used in a shader descriptor or as a framebuffer attachment.
use std::error;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::mem::MaybeUninit;
use std::ops::Range;
use std::ptr;
use std::sync::Arc;
use crate::device::Device;
use crate::format::Format;
use crate::format::FormatTy;
use crate::image::sys::UnsafeImage;
use crate::image::ImageAccess;
use crate::image::ImageDimensions;
use crate::memory::DeviceMemoryAllocError;
use crate::sampler::Sampler;
use crate::check_errors;
use crate::vk;
use crate::OomError;
use crate::SafeDeref;
use crate::VulkanObject;
/// A safe image view that checks for validity and keeps its attached image alive.
pub struct ImageView<I>
where
I: ImageAccess,
{
image: I,
inner: UnsafeImageView,
array_layers: Range<u32>,
format: Format,
identity_swizzle: bool,
ty: ImageViewType,
}
impl<I> ImageView<I>
where
I: ImageAccess,
{
/// Creates a new image view spanning all mipmap levels and array layers in the image.
///
/// The view type is automatically determined from the image, based on its dimensions and
/// number of layers.
#[inline]
pub fn new(image: I) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let ty = match image.dimensions() {
ImageDimensions::Dim1d {
array_layers: 1,..
} => ImageViewType::Dim1d,
ImageDimensions::Dim1d {.. } => ImageViewType::Dim1dArray,
ImageDimensions::Dim2d {
array_layers: 1,..
} => ImageViewType::Dim2d,
ImageDimensions::Dim2d {.. } => ImageViewType::Dim2dArray,
ImageDimensions::Dim3d {.. } => ImageViewType::Dim3d,
};
Self::with_type(image, ty)
}
/// Crates a new image view with a custom type.
pub fn with_type(
image: I,
ty: ImageViewType,
) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let mipmap_levels = 0..image.mipmap_levels();
let array_layers = 0..image.dimensions().array_layers();
Self::with_type_ranges(image, ty, mipmap_levels, array_layers)
}
/// Creates a new image view with a custom type and ranges of mipmap levels and array layers.
pub fn with_type_ranges(
image: I,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<Arc<ImageView<I>>, ImageViewCreationError> {
let dimensions = image.dimensions();
let format = image.format();
let image_inner = image.inner().image;
let usage = image_inner.usage();
let flags = image_inner.flags();
if mipmap_levels.end <= mipmap_levels.start
|| mipmap_levels.end > image_inner.mipmap_levels()
{
return Err(ImageViewCreationError::MipMapLevelsOutOfRange);
}
if array_layers.end <= array_layers.start || array_layers.end > dimensions.array_layers() {
return Err(ImageViewCreationError::ArrayLayersOutOfRange);
}
if!(usage.sampled
|| usage.storage
|| usage.color_attachment
|| usage.depth_stencil_attachment
|| usage.input_attachment
|| usage.transient_attachment)
{
return Err(ImageViewCreationError::InvalidImageUsage);
}
// Check for compatibility with the image
match (
ty,
image.dimensions(),
array_layers.end - array_layers.start,
mipmap_levels.end - mipmap_levels.start,
) {
(ImageViewType::Dim1d, ImageDimensions::Dim1d {.. }, 1, _) => (),
(ImageViewType::Dim1dArray, ImageDimensions::Dim1d {.. }, _, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim2d {.. }, 1, _) => (),
(ImageViewType::Dim2dArray, ImageDimensions::Dim2d {.. }, _, _) => (),
(ImageViewType::Cubemap, ImageDimensions::Dim2d {.. }, 6, _)
if flags.cube_compatible =>
{
()
}
(ImageViewType::CubemapArray, ImageDimensions::Dim2d {.. }, n, _)
if flags.cube_compatible && n % 6 == 0 =>
{
()
}
(ImageViewType::Dim3d, ImageDimensions::Dim3d {.. }, 1, _) => (),
(ImageViewType::Dim2d, ImageDimensions::Dim3d {.. }, 1, 1)
if flags.array_2d_compatible =>
{
()
}
(ImageViewType::Dim2dArray, ImageDimensions::Dim3d {.. }, _, 1)
if flags.array_2d_compatible =>
{
()
}
_ => return Err(ImageViewCreationError::IncompatibleType),
}
let inner =
unsafe { UnsafeImageView::new(image_inner, ty, mipmap_levels, array_layers.clone())? };
Ok(Arc::new(ImageView {
image,
inner,
array_layers,
format,
identity_swizzle: true, // FIXME:
ty,
}))
}
/// Returns the wrapped image that this image view was created from.
pub fn image(&self) -> &I {
&self.image
}
}
/// Error that can happen when creating an image view.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ImageViewCreationError {
/// Allocating memory failed.
AllocError(DeviceMemoryAllocError),
/// The specified range of array layers was out of range for the image.
ArrayLayersOutOfRange,
/// The specified range of mipmap levels was out of range for the image.
MipMapLevelsOutOfRange,
/// The requested [`ImageViewType`] was not compatible with the image, or with the specified ranges of array layers and mipmap levels.
IncompatibleType,
/// The image was not created with
/// [one of the required usages](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#valid-imageview-imageusage)
/// for image views.
InvalidImageUsage,
}
impl error::Error for ImageViewCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ImageViewCreationError::AllocError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for ImageViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
fmt,
"{}",
match *self {
ImageViewCreationError::AllocError(err) => "allocating memory failed",
ImageViewCreationError::ArrayLayersOutOfRange => "array layers are out of range",
ImageViewCreationError::MipMapLevelsOutOfRange => "mipmap levels are out of range",
ImageViewCreationError::IncompatibleType =>
"image view type is not compatible with image, array layers or mipmap levels",
ImageViewCreationError::InvalidImageUsage =>
"the usage of the image is not compatible with image views",
}
)
}
}
impl From<OomError> for ImageViewCreationError {
#[inline]
fn from(err: OomError) -> ImageViewCreationError {
ImageViewCreationError::AllocError(DeviceMemoryAllocError::OomError(err))
}
}
/// A low-level wrapper around a `vkImageView`.
pub struct UnsafeImageView {
view: vk::ImageView,
device: Arc<Device>,
}
impl UnsafeImageView {
/// Creates a new view from an image.
///
/// # Safety
/// - The returned `UnsafeImageView` must not outlive `image`.
/// - `image` must have a usage that is compatible with image views.
/// - `ty` must be compatible with the dimensions and flags of the image.
/// - `mipmap_levels` must not be empty, must be within the range of levels of the image, and be compatible with the requested `ty`.
/// - `array_layers` must not be empty, must be within the range of layers of the image, and be compatible with the requested `ty`.
///
/// # Panics
/// Panics if the image is a YcbCr image, since the Vulkano API is not yet flexible enough to
/// specify the aspect of image.
pub unsafe fn new(
image: &UnsafeImage,
ty: ImageViewType,
mipmap_levels: Range<u32>,
array_layers: Range<u32>,
) -> Result<UnsafeImageView, OomError> {
let vk = image.device().pointers();
debug_assert!(mipmap_levels.end > mipmap_levels.start);
debug_assert!(mipmap_levels.end <= image.mipmap_levels());
debug_assert!(array_layers.end > array_layers.start);
debug_assert!(array_layers.end <= image.dimensions().array_layers());
let aspect_mask = match image.format().ty() {
FormatTy::Float | FormatTy::Uint | FormatTy::Sint | FormatTy::Compressed => {
vk::IMAGE_ASPECT_COLOR_BIT
}
FormatTy::Depth => vk::IMAGE_ASPECT_DEPTH_BIT,
FormatTy::Stencil => vk::IMAGE_ASPECT_STENCIL_BIT,
FormatTy::DepthStencil => vk::IMAGE_ASPECT_DEPTH_BIT | vk::IMAGE_ASPECT_STENCIL_BIT,
// Not yet supported --> would require changes to ImmutableImage API :-)
FormatTy::Ycbcr => unimplemented!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
image: image.internal_object(),
viewType: ty.into(),
format: image.format() as u32,
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
levelCount: mipmap_levels.end - mipmap_levels.start,
baseArrayLayer: array_layers.start,
layerCount: array_layers.end - array_layers.start,
},
};
let mut output = MaybeUninit::uninit();
check_errors(vk.CreateImageView(
image.device().internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(UnsafeImageView {
view,
device: image.device().clone(),
})
}
}
unsafe impl VulkanObject for UnsafeImageView {
type Object = vk::ImageView;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_IMAGE_VIEW;
#[inline]
fn internal_object(&self) -> vk::ImageView {
self.view
}
}
impl fmt::Debug for UnsafeImageView {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan image view {:?}>", self.view)
}
}
impl Drop for UnsafeImageView {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyImageView(self.device.internal_object(), self.view, ptr::null());
}
}
}
impl PartialEq for UnsafeImageView {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.view == other.view && self.device == other.device
}
}
impl Eq for UnsafeImageView {}
impl Hash for UnsafeImageView {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.view.hash(state);
self.device.hash(state);
}
}
/// The geometry type of an image view.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageViewType {
Dim1d,
Dim1dArray,
Dim2d,
Dim2dArray,
Dim3d,
Cubemap,
CubemapArray,
}
impl From<ImageViewType> for vk::ImageViewType {
fn from(image_view_type: ImageViewType) -> Self {
match image_view_type {
ImageViewType::Dim1d => vk::IMAGE_VIEW_TYPE_1D,
ImageViewType::Dim1dArray => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
ImageViewType::Dim2d => vk::IMAGE_VIEW_TYPE_2D,
ImageViewType::Dim2dArray => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
ImageViewType::Dim3d => vk::IMAGE_VIEW_TYPE_3D,
ImageViewType::Cubemap => vk::IMAGE_VIEW_TYPE_CUBE,
ImageViewType::CubemapArray => vk::IMAGE_VIEW_TYPE_CUBE_ARRAY,
}
}
}
/// Trait for types that represent the GPU can access an image view.
pub unsafe trait ImageViewAbstract {
/// Returns the wrapped image that this image view was created from.
fn image(&self) -> &dyn ImageAccess;
/// Returns the inner unsafe image view object used by this image view.
fn inner(&self) -> &UnsafeImageView;
/// Returns the range of array layers of the wrapped image that this view exposes.
fn array_layers(&self) -> Range<u32>;
/// Returns the format of this view. This can be different from the parent's format.
fn format(&self) -> Format;
/// Returns true if the view doesn't use components swizzling.
///
/// Must be true when the view is used as a framebuffer attachment or TODO: I don't remember
/// the other thing.
fn identity_swizzle(&self) -> bool;
/// Returns the [`ImageViewType`] of this image view.
fn ty(&self) -> ImageViewType;
/// Returns true if the given sampler can be used with this image view.
///
/// This method should check whether the sampler's configuration can be used with the format
/// of the view.
// TODO: return a Result and propagate it when binding to a descriptor set
fn can_be_sampled(&self, _sampler: &Sampler) -> bool {
true /* FIXME */
}
}
unsafe impl<I> ImageViewAbstract for ImageView<I>
where
I: ImageAccess,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
&self.image
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
&self.inner
}
#[inline]
fn array_layers(&self) -> Range<u32> {
self.array_layers.clone()
}
#[inline]
fn format(&self) -> Format {
// TODO: remove this default impl
self.format
}
#[inline]
fn identity_swizzle(&self) -> bool {
self.identity_swizzle
}
#[inline]
fn ty(&self) -> ImageViewType {
self.ty
}
}
unsafe impl<T> ImageViewAbstract for T
where
T: SafeDeref,
T::Target: ImageViewAbstract,
{
#[inline]
fn image(&self) -> &dyn ImageAccess {
(**self).image()
}
#[inline]
fn inner(&self) -> &UnsafeImageView {
(**self).inner()
}
#[inline]
fn array_layers(&self) -> Range<u32> |
#[inline]
fn format(&self) -> Format {
(**self).format()
}
#[inline]
fn identity_swizzle(&self) -> bool {
(**self).identity_swizzle()
}
#[inline]
fn ty(&self) -> ImageViewType {
(**self).ty()
}
#[inline]
fn can_be_sampled(&self, sampler: &Sampler) -> bool {
(**self).can_be_sampled(sampler)
}
}
impl PartialEq for dyn ImageViewAbstract + Send + Sync {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner()
}
}
impl Eq for dyn ImageViewAbstract + Send + Sync {}
impl Hash for dyn ImageViewAbstract + Send + Sync {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
}
}
| {
(**self).array_layers()
} | identifier_body |
types.rs | //! Перечисляемые типы данных, используемые при работе с библиотекой
use std::u32;
/// Возможные типы данных базы данных
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(deprecated)]// Позволяем deprecated внутри перечисления из-за https://github.com/rust-lang/rust/issues/38832
#[repr(u16)]
pub enum Type {
/// (ORANET TYPE) character string. У колонок с типами `varchar2/nvarchar2`.
CHR = 1,
/// (ORANET TYPE) oracle numeric
NUM = 2,
/// (ORANET TYPE) integer
INT = 3,
/// (ORANET TYPE) Floating point number
FLT = 4,
/// zero terminated string
STR = 5,
/// NUM with preceding length byte
VNU = 6,
/// (ORANET TYPE) Packed Decimal Numeric
PDN = 7,
/// long
#[deprecated(note="Not recommented to use by Oracle, use LOB instead")]
LNG = 8,
/// Variable character string
VCS = 9,
/// Null/empty PCC Descriptor entry
NON = 10,
/// rowid
RID = 11,
/// date in oracle format
DAT = 12,
/// binary in VCS format
VBI = 15,
/// Native Binary float
BFLOAT = 21,
/// NAtive binary double
BDOUBLE = 22,
/// binary data(DTYBIN). У колонок с типом `raw`.
BIN = 23,
/// long binary. У колонок с типом `long raw`.
LBI = 24,
/// unsigned integer
UIN = 68,
/// Display sign leading separate
SLS = 91,
/// Longer longs (char)
LVC = 94,
/// Longer long binary
LVB = 95,
/// Ansi fixed char. У колонок с типами `char/nchar`.
AFC = 96,
/// Ansi Var char
AVC = 97,
/// binary float canonical
IBFLOAT = 100,
/// binary double canonical
IBDOUBLE = 101,
/// cursor type
CUR = 102,
/// rowid descriptor
RDD = 104,
/// label type
LAB = 105,
/// oslabel type
OSL = 106,
/// named object type
NTY = 108,
/// ref type
REF = 110,
/// character lob
CLOB = 112,
/// binary lob
BLOB = 113,
/// binary file lob
BFILEE = 114,
/// character file lob
CFILEE = 115,
/// result set type
RSET = 116,
/// named collection type (varray or nested table)
NCO = 122,
/// OCIString type
VST = 155,
/// OCIDate type
ODT = 156,
// datetimes and intervals
/// ANSI Date
DATE = 184,
/// TIME
TIME = 185,
/// TIME WITH TIME ZONE
TIME_TZ = 186,
/// TIMESTAMP
TIMESTAMP = 187,
/// TIMESTAMP WITH TIME ZONE
TIMESTAMP_TZ = 188,
/// INTERVAL YEAR TO MONTH
INTERVAL_YM = 189,
/// INTERVAL DAY TO SECOND
INTERVAL_DS = 190,
/// /* */
TIMESTAMP_LTZ = 232,
/// pl/sql representation of named types
PNTY = 241,
// some pl/sql specific types
/// pl/sql'record' (or %rowtype)
REC = 250,
/// pl/sql 'indexed table'
TAB = 251,
/// pl/sql 'boolean'
BOL = 252,
}
/// Режим, в котором создавать окружение при вызове `OCIEnvNlsCreate()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum CreateMode {
/// The default value, which is non-UTF-16 encoding.
Default = 0,
/// Uses threaded environment. Internal data structures not exposed to the user are protected from concurrent
/// accesses by multiple threads.
Threaded = 1 << 0,
/// Uses object features.
Object = 1 << 1,
/// Uses publish-subscribe notifications.
Events = 1 << 2,
//Shared = 1 << 4,
/// Suppresses the calling of the dynamic callback routine OCIEnvCallback(). The default behavior is to allow
/// calling of OCIEnvCallback() when the environment is created.
/// See Also:
/// "Dynamic Callback Registrations"
NoUcb = 1 << 6,
/// No mutual exclusion (mutex) locking occurs in this mode. All OCI calls done on the environment handle,
/// or on handles derived from the environment handle, must be serialized. `OCI_THREADED` must also be specified
/// when `OCI_ENV_NO_MUTEX` is specified.
EnvNoMutex = 1 << 7,
//SharedExt = 1 << 8,
//AlwaysBlocking = 1 << 10,
//UseLDAP = 1 << 12,
//RegLDAPOnly = 1 << 13,
//UTF16 = 1 << 14,
//AFC_PAD_ON = 1 << 15,
//NewLengthSemantics = 1 << 17,
//NoMutexStmt = 1 << 18,
//MutexEnvOnly = 1 << 19,
/// Suppresses NLS character validation; NLS character validation suppression is on by default beginning with
/// Oracle Database 11g Release 1 (11.1). Use `OCI_ENABLE_NLS_VALIDATION` to enable NLS character validation.
/// See Comments for more information.
SuppressNlsValidation = 1 << 20,
//OCI_MUTEX_TRY = 1 << 21,
/// Turns on N' substitution.
NCharLiteralReplaceOn = 1 << 22,
/// Turns off N' substitution. If neither this mode nor `OCI_NCHAR_LITERAL_REPLACE_ON` is used, the substitution
/// is determined by the environment variable `ORA_NCHAR_LITERAL_REPLACE`, which can be set to `TRUE` or `FALSE`.
/// When it is set to TRUE, the replacement is turned on; otherwise it is turned off, the default setting in OCI.
NCharLiteralReplaceOff = 1 << 23,
/// Enables NLS character validation. See Comments for more information.
EnableNlsValidation = 1 << 24,
}
impl Default for CreateMode {
fn default() -> Self { CreateMode::Default }
}
/// Режим, в котором подключаться к cерверу базы данных при вызове `OCIServerAttach()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AttachMode {
/// For encoding, this value tells the server handle to use the setting in the environment handle.
Default = 0,
/// Use connection pooling.
CPool = 1 << 9, | /// Specifies the various modes of operation
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AuthMode {
/// In this mode, the user session context returned can only ever be set with the server context
/// specified in `svchp`. For encoding, the server handle uses the setting in the environment handle.
Default = 0,
/// In this mode, the new user session context can be set in a service handle with a different server handle.
/// This mode establishes the user session context. To create a migratable session, the service handle must already
/// be set with a nonmigratable user session, which becomes the "creator" session of the migratable session. That is,
/// a migratable session must have a nonmigratable parent session.
///
/// `Migrate` should not be used when the session uses connection pool underneath. The session migration and multiplexing
/// happens transparently to the user.
Migrate = 1 << 0,
/// In this mode, you are authenticated for `SYSDBA` access
SysDba = 1 << 1,
/// In this mode, you are authenticated for `SYSOPER` access
SysOper = 1 << 2,
/// This mode can only be used with `SysDba` or `SysOper` to authenticate for certain administration tasks
PrelimAuth = 1 << 3,
//PICache = 1 << 4,
/// Enables statement caching with default size on the given service handle. It is optional to pass this mode
/// if the application is going to explicitly set the size later using `OCI_ATTR_STMTCACHESIZE` on that service handle.
StmtCache = 1 << 6,
//StatelessCall = 1 << 7,
//StatelessTxn = 1 << 8,
//StatelessApp = 1 << 9,
//SysAsm = 1 << 14,
//SysBkp = 1 << 16,
//SysDgd = 1 << 17,
//SysKmt = 1 << 18,
}
impl Default for AuthMode {
fn default() -> Self { AuthMode::Default }
}
/// Диалект Oracle-а, используемый для разбора SQL-кода запросов. Рекомендуется всегда использовать нативный для сервера
/// диалект, он является диалектом по умолчанию при выполнении [`prepare`][1] без параметров.
///
/// [1]:../struct.Connection.html#method.prepare
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum Syntax {
/// Синтаксис зависит от версии сервера базы данных.
Native = 1,
/// V7 ORACLE parsing syntax.
V7 = 2,
//V8 = 3,
/// Specifies the statement to be translated according to the SQL translation profile set in the session.
Foreign = u32::MAX as isize,
}
impl Default for Syntax {
fn default() -> Self { Syntax::Native }
}
/// Виды выражений, которые могут быть у него после его подготовки.
/// Вид выражения влияет на то, с какими параметрыми вызывать функцию `OCIExecute()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
#[repr(u16)]
pub enum StatementType {
/// Unknown statement
UNKNOWN = 0,
/// Select statement
SELECT = 1,
/// Update statement
UPDATE = 2,
/// delete statement
DELETE = 3,
/// Insert Statement
INSERT = 4,
/// create statement
CREATE = 5,
/// drop statement
DROP = 6,
/// alter statement
ALTER = 7,
/// begin... (pl/sql statement)
BEGIN = 8,
/// declare.. (pl/sql statement)
DECLARE = 9,
/// corresponds to kpu call
CALL = 10,
}
/// Виды кодировок, поддерживаемых базой данных.
///
/// В документации нигде не перечислены соответствия имени кодировки ее числовому значению, поэтому они получены
/// следующим SQL-скриптом:
/// ```sql
/// select value as name, nls_charset_id(value) as val
/// from v$nls_valid_values
/// where parameter = 'CHARACTERSET'
/// order by nls_charset_id(value)
/// ```
/// http://www.mydul.net/charsets.html
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum Charset {
/// Использовать настройки из переменных окружения `NLS_LANG` (для типов `CHAR`, `VARCHAR2` и `CLOB`)
/// и `NLS_NCHAR` (для типов `NCHAR`, `NVARCHAR2` и `NCLOB`).
///
/// Данная настройка является настройкой по умолчанию для базы данных и библиотека возвращает ее в реализации
/// метода `default()`.
Default = 0,
/// ASCII 7-bit American
US7ASCII = 1,
/// IBM-PC Code Page 437 8-bit American
US8PC437 = 4,
/// IBM-PC Code Page 850 8-bit West European
WE8PC850 = 10,
/// IBM-PC Code Page 858 8-bit West European
WE8PC858 = 28,
/// ISO 8859-1 West European
WE8ISO8859P1 = 31,
/// ISO 8859-2 East European
EE8ISO8859P2 = 32,
/// ISO 8859-3 South European
SE8ISO8859P3 = 33,
/// ISO 8859-4 North and North-East European
NEE8ISO8859P4 = 34,
/// ISO 8859-5 Latin/Cyrillic
CL8ISO8859P5 = 35,
/// ISO 8859-6 Latin/Arabic
AR8ISO8859P6 = 36,
/// ISO 8859-7 Latin/Greek
EL8ISO8859P7 = 37,
/// ISO 8859-8 Latin/Hebrew
IW8ISO8859P8 = 38,
/// ISO 8859-9 West European & Turkish
WE8ISO8859P9 = 39,
/// ISO 8859-10 North European
NE8ISO8859P10 = 40,
/// Thai Industrial Standard 620-2533 - ASCII 8-bit
TH8TISASCII = 41,
/// MS Windows Code Page 1258 8-bit Vietnamese
VN8MSWIN1258 = 45,
/// ISO 8859-1 West European
WE8ISO8859P15 = 46,
/// ISO 8859-13 Baltic
BLT8ISO8859P13 = 47,
/// ISO 8859-14 Celtic
CEL8ISO8859P14 = 48,
/// KOI8 Ukrainian Cyrillic
CL8KOI8U = 51,
/// ISO 8859-9 Azerbaijani
AZ8ISO8859P9E = 52,
/// IBM-PC Code Page 852 8-bit East European
EE8PC852 = 150,
/// IBM-PC Code Page 866 8-bit Latin/Cyrillic
RU8PC866 = 152,
/// IBM-PC Code Page 857 8-bit Turkish
TR8PC857 = 156,
/// MS Windows Code Page 1250 8-bit East European
EE8MSWIN1250 = 170,
/// MS Windows Code Page 1251 8-bit Latin/Cyrillic
CL8MSWIN1251 = 171,
/// MS Windows Code Page 923 8-bit Estonian
ET8MSWIN923 = 172,
/// MS Windows Code Page 1253 8-bit Latin/Greek
EL8MSWIN1253 = 174,
/// MS Windows Code Page 1255 8-bit Latin/Hebrew
IW8MSWIN1255 = 175,
/// MS Windows Code Page 921 8-bit Lithuanian
LT8MSWIN921 = 176,
/// MS Windows Code Page 1254 8-bit Turkish
TR8MSWIN1254 = 177,
/// MS Windows Code Page 1252 8-bit West European
WE8MSWIN1252 = 178,
/// MS Windows Code Page 1257 8-bit Baltic
BLT8MSWIN1257 = 179,
/// Latvian Standard LVS8-92(1) Windows/Unix 8-bit Baltic
BLT8CP921 = 191,
/// RELCOM Internet Standard 8-bit Latin/Cyrillic
CL8KOI8R = 196,
/// IBM-PC Code Page 775 8-bit Baltic
BLT8PC775 = 197,
/// IBM-PC Code Page 737 8-bit Greek/Latin
EL8PC737 = 382,
/// ASMO Extended 708 8-bit Latin/Arabic
AR8ASMO8X = 500,
/// Arabic MS-DOS 720 Server 8-bit Latin/Arabic
AR8ADOS720 = 558,
/// MS Windows Code Page 1256 8-Bit Latin/Arabic
AR8MSWIN1256 = 560,
/// EUC 24-bit Japanese
JA16EUC = 830,
/// Shift-JIS 16-bit Japanese
JA16SJIS = 832,
/// Same as `JA16EUC` except for the way that the wave dash and the tilde are mapped to and from Unicode
JA16EUCTILDE = 837,
/// Same as `JA16SJIS` except for the way that the wave dash and the tilde are mapped to and from Unicode
JA16SJISTILDE = 838,
/// KSC5601 16-bit Korean
KO16KSC5601 = 840,
/// MS Windows Code Page 949 Korean
KO16MSWIN949 = 846,
/// CGB2312-80 16-bit Simplified Chinese
ZHS16CGB231280 = 850,
/// GBK 16-bit Simplified Chinese
ZHS16GBK = 852,
/// GB18030 32-bit Simplified Chinese
ZHS32GB18030 = 854,
/// EUC 32-bit Traditional Chinese
ZHT32EUC = 860,
/// BIG5 16-bit Traditional Chinese
ZHT16BIG5 = 865,
/// MS Windows Code Page 950 Traditional Chinese
ZHT16MSWIN950 = 867,
/// MS Windows Code Page 950 with Hong Kong Supplementary Character Set HKSCS-2001 (character set conversion to and from Unicode is based on Unicode 3.0)
ZHT16HKSCS = 868,
/// Unicode 3.0 UTF-8 Universal character set, CESU-8 compliant
UTF8 = 871,
/// Unicode 7.0 UTF-8 Universal character set
AL32UTF8 = 873,
/// Unicode 7.0 UTF-16 Universal character set
AL16UTF16 = 2000,
}
impl Default for Charset {
fn default() -> Self {
Charset::Default
}
} | }
impl Default for AttachMode {
fn default() -> Self { AttachMode::Default }
} | random_line_split |
types.rs | //! Перечисляемые типы данных, используемые при работе с библиотекой
use std::u32;
/// Возможные типы данных базы данных
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(deprecated)]// Позволяем deprecated внутри перечисления из-за https://github.com/rust-lang/rust/issues/38832
#[repr(u16)]
pub enum Type {
/// (ORANET TYPE) character string. У колонок с типами `varchar2/nvarchar2`.
CHR = 1,
/// (ORANET TYPE) oracle numeric
NUM = 2,
/// (ORANET TYPE) integer
INT = 3,
/// (ORANET TYPE) Floating point number
FLT = 4,
/// zero terminated string
STR = 5,
/// NUM with preceding length byte
VNU = 6,
/// (ORANET TYPE) Packed Decimal Numeric
PDN = 7,
/// long
#[deprecated(note="Not recommented to use by Oracle, use LOB instead")]
LNG = 8,
/// Variable character string
VCS = 9,
/// Null/empty PCC Descriptor entry
NON = 10,
/// rowid
RID = 11,
/// date in oracle format
DAT = 12,
/// binary in VCS format
VBI = 15,
/// Native Binary float
BFLOAT = 21,
/// NAtive binary double
BDOUBLE = 22,
/// binary data(DTYBIN). У колонок с типом `raw`.
BIN = 23,
/// long binary. У колонок с типом `long raw`.
LBI = 24,
/// unsigned integer
UIN = 68,
/// Display sign leading separate
SLS = 91,
/// Longer longs (char)
LVC = 94,
/// Longer long binary
LVB = 95,
/// Ansi fixed char. У колонок с типами `char/nchar`.
AFC = 96,
/// Ansi Var char
AVC = 97,
/// binary float canonical
IBFLOAT = 100,
/// binary double canonical
IBDOUBLE = 101,
/// cursor type
CUR = 102,
/// rowid descriptor
RDD = 104,
/// label type
LAB = 105,
/// oslabel type
OSL = 106,
/// named object type
NTY = 108,
/// ref type
REF = 110,
/// character lob
CLOB = 112,
/// binary lob
BLOB = 113,
/// binary file lob
BFILEE = 114,
/// character file lob
CFILEE = 115,
/// result set type
RSET = 116,
/// named collection type (varray or nested table)
NCO = 122,
/// OCIString type
VST = 155,
/// OCIDate type
ODT = 156,
// datetimes and intervals
/// ANSI Date
DATE = 184,
/// TIME
TIME = 185,
/// TIME WITH TIME ZONE
TIME_TZ = 186,
/// TIMESTAMP
TIMESTAMP = 187,
/// TIMESTAMP WITH TIME ZONE
TIMESTAMP_TZ = 188,
/// INTERVAL YEAR TO MONTH
INTERVAL_YM = 189,
/// INTERVAL DAY TO SECOND
INTERVAL_DS = 190,
/// /* */
TIMESTAMP_LTZ = 232,
/// pl/sql representation of named types
PNTY = 241,
// some pl/sql specific types
/// pl/sql'record' (or %rowtype)
REC = 250,
/// pl/sql 'indexed table'
TAB = 251,
/// pl/sql 'boolean'
BOL = 252,
}
/// Режим, в котором создавать окружение при вызове `OCIEnvNlsCreate()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum CreateMode {
/// The default value, which is non-UTF-16 encoding.
Default = 0,
/// Uses threaded environment. Internal data structures not exposed to the user are protected from concurrent
/// accesses by multiple threads.
Threaded = 1 << 0,
/// Uses object features.
Object = 1 << 1,
/// Uses publish-subscribe notifications.
Events = 1 << 2,
//Shared = 1 << 4,
/// Suppresses the calling of the dynamic callback routine OCIEnvCallback(). The default behavior is to allow
/// calling of OCIEnvCallback() when the environment is created.
/// See Also:
/// "Dynamic Callback Registrations"
NoUcb = 1 << 6,
/// No mutual exclusion (mutex) locking occurs in this mode. All OCI calls done on the environment handle,
/// or on handles derived from the environment handle, must be serialized. `OCI_THREADED` must also be specified
/// when `OCI_ENV_NO_MUTEX` is specified.
EnvNoMutex = 1 << 7,
//SharedExt = 1 << 8,
//AlwaysBlocking = 1 << 10,
//UseLDAP = 1 << 12,
//RegLDAPOnly = 1 << 13,
//UTF16 = 1 << 14,
//AFC_PAD_ON = 1 << 15,
//NewLengthSemantics = 1 << 17,
//NoMutexStmt = 1 << 18,
//MutexEnvOnly = 1 << 19,
/// Suppresses NLS character validation; NLS character validation suppression is on by default beginning with
/// Oracle Database 11g Release 1 (11.1). Use `OCI_ENABLE_NLS_VALIDATION` to enable NLS character validation.
/// See Comments for more information.
SuppressNlsValidation = 1 << 20,
//OCI_MUTEX_TRY = 1 << 21,
/// Turns on N' substitution.
NCharLiteralReplaceOn = 1 << 22,
/// Turns off N' substitution. If neither this mode nor `OCI_NCHAR_LITERAL_REPLACE_ON` is used, the substitution
/// is determined by the environment variable `ORA_NCHAR_LITERAL_REPLACE`, which can be set to `TRUE` or `FALSE`.
/// When it is set to TRUE, the replacement is turned on; otherwise it is turned off, the default setting in OCI.
NCharLiteralReplaceOff = 1 << 23,
/// Enables NLS character validation. See Comments for more information.
EnableNlsValidation = 1 << 24,
}
impl Default for CreateMode {
fn default() -> Self { CreateMode::Default }
}
/// Режим, в котором подключаться к cерверу базы данных при вызове `OCIServerAttach()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AttachMo | /// For encoding, this value tells the server handle to use the setting in the environment handle.
Default = 0,
/// Use connection pooling.
CPool = 1 << 9,
}
impl Default for AttachMode {
fn default() -> Self { AttachMode::Default }
}
/// Specifies the various modes of operation
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum AuthMode {
/// In this mode, the user session context returned can only ever be set with the server context
/// specified in `svchp`. For encoding, the server handle uses the setting in the environment handle.
Default = 0,
/// In this mode, the new user session context can be set in a service handle with a different server handle.
/// This mode establishes the user session context. To create a migratable session, the service handle must already
/// be set with a nonmigratable user session, which becomes the "creator" session of the migratable session. That is,
/// a migratable session must have a nonmigratable parent session.
///
/// `Migrate` should not be used when the session uses connection pool underneath. The session migration and multiplexing
/// happens transparently to the user.
Migrate = 1 << 0,
/// In this mode, you are authenticated for `SYSDBA` access
SysDba = 1 << 1,
/// In this mode, you are authenticated for `SYSOPER` access
SysOper = 1 << 2,
/// This mode can only be used with `SysDba` or `SysOper` to authenticate for certain administration tasks
PrelimAuth = 1 << 3,
//PICache = 1 << 4,
/// Enables statement caching with default size on the given service handle. It is optional to pass this mode
/// if the application is going to explicitly set the size later using `OCI_ATTR_STMTCACHESIZE` on that service handle.
StmtCache = 1 << 6,
//StatelessCall = 1 << 7,
//StatelessTxn = 1 << 8,
//StatelessApp = 1 << 9,
//SysAsm = 1 << 14,
//SysBkp = 1 << 16,
//SysDgd = 1 << 17,
//SysKmt = 1 << 18,
}
impl Default for AuthMode {
fn default() -> Self { AuthMode::Default }
}
/// Диалект Oracle-а, используемый для разбора SQL-кода запросов. Рекомендуется всегда использовать нативный для сервера
/// диалект, он является диалектом по умолчанию при выполнении [`prepare`][1] без параметров.
///
/// [1]:../struct.Connection.html#method.prepare
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum Syntax {
/// Синтаксис зависит от версии сервера базы данных.
Native = 1,
/// V7 ORACLE parsing syntax.
V7 = 2,
//V8 = 3,
/// Specifies the statement to be translated according to the SQL translation profile set in the session.
Foreign = u32::MAX as isize,
}
impl Default for Syntax {
fn default() -> Self { Syntax::Native }
}
/// Виды выражений, которые могут быть у него после его подготовки.
/// Вид выражения влияет на то, с какими параметрыми вызывать функцию `OCIExecute()`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(dead_code)]
#[repr(u16)]
pub enum StatementType {
/// Unknown statement
UNKNOWN = 0,
/// Select statement
SELECT = 1,
/// Update statement
UPDATE = 2,
/// delete statement
DELETE = 3,
/// Insert Statement
INSERT = 4,
/// create statement
CREATE = 5,
/// drop statement
DROP = 6,
/// alter statement
ALTER = 7,
/// begin... (pl/sql statement)
BEGIN = 8,
/// declare.. (pl/sql statement)
DECLARE = 9,
/// corresponds to kpu call
CALL = 10,
}
/// Виды кодировок, поддерживаемых базой данных.
///
/// В документации нигде не перечислены соответствия имени кодировки ее числовому значению, поэтому они получены
/// следующим SQL-скриптом:
/// ```sql
/// select value as name, nls_charset_id(value) as val
/// from v$nls_valid_values
/// where parameter = 'CHARACTERSET'
/// order by nls_charset_id(value)
/// ```
/// http://www.mydul.net/charsets.html
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum Charset {
/// Использовать настройки из переменных окружения `NLS_LANG` (для типов `CHAR`, `VARCHAR2` и `CLOB`)
/// и `NLS_NCHAR` (для типов `NCHAR`, `NVARCHAR2` и `NCLOB`).
///
/// Данная настройка является настройкой по умолчанию для базы данных и библиотека возвращает ее в реализации
/// метода `default()`.
Default = 0,
/// ASCII 7-bit American
US7ASCII = 1,
/// IBM-PC Code Page 437 8-bit American
US8PC437 = 4,
/// IBM-PC Code Page 850 8-bit West European
WE8PC850 = 10,
/// IBM-PC Code Page 858 8-bit West European
WE8PC858 = 28,
/// ISO 8859-1 West European
WE8ISO8859P1 = 31,
/// ISO 8859-2 East European
EE8ISO8859P2 = 32,
/// ISO 8859-3 South European
SE8ISO8859P3 = 33,
/// ISO 8859-4 North and North-East European
NEE8ISO8859P4 = 34,
/// ISO 8859-5 Latin/Cyrillic
CL8ISO8859P5 = 35,
/// ISO 8859-6 Latin/Arabic
AR8ISO8859P6 = 36,
/// ISO 8859-7 Latin/Greek
EL8ISO8859P7 = 37,
/// ISO 8859-8 Latin/Hebrew
IW8ISO8859P8 = 38,
/// ISO 8859-9 West European & Turkish
WE8ISO8859P9 = 39,
/// ISO 8859-10 North European
NE8ISO8859P10 = 40,
/// Thai Industrial Standard 620-2533 - ASCII 8-bit
TH8TISASCII = 41,
/// MS Windows Code Page 1258 8-bit Vietnamese
VN8MSWIN1258 = 45,
/// ISO 8859-1 West European
WE8ISO8859P15 = 46,
/// ISO 8859-13 Baltic
BLT8ISO8859P13 = 47,
/// ISO 8859-14 Celtic
CEL8ISO8859P14 = 48,
/// KOI8 Ukrainian Cyrillic
CL8KOI8U = 51,
/// ISO 8859-9 Azerbaijani
AZ8ISO8859P9E = 52,
/// IBM-PC Code Page 852 8-bit East European
EE8PC852 = 150,
/// IBM-PC Code Page 866 8-bit Latin/Cyrillic
RU8PC866 = 152,
/// IBM-PC Code Page 857 8-bit Turkish
TR8PC857 = 156,
/// MS Windows Code Page 1250 8-bit East European
EE8MSWIN1250 = 170,
/// MS Windows Code Page 1251 8-bit Latin/Cyrillic
CL8MSWIN1251 = 171,
/// MS Windows Code Page 923 8-bit Estonian
ET8MSWIN923 = 172,
/// MS Windows Code Page 1253 8-bit Latin/Greek
EL8MSWIN1253 = 174,
/// MS Windows Code Page 1255 8-bit Latin/Hebrew
IW8MSWIN1255 = 175,
/// MS Windows Code Page 921 8-bit Lithuanian
LT8MSWIN921 = 176,
/// MS Windows Code Page 1254 8-bit Turkish
TR8MSWIN1254 = 177,
/// MS Windows Code Page 1252 8-bit West European
WE8MSWIN1252 = 178,
/// MS Windows Code Page 1257 8-bit Baltic
BLT8MSWIN1257 = 179,
/// Latvian Standard LVS8-92(1) Windows/Unix 8-bit Baltic
BLT8CP921 = 191,
/// RELCOM Internet Standard 8-bit Latin/Cyrillic
CL8KOI8R = 196,
/// IBM-PC Code Page 775 8-bit Baltic
BLT8PC775 = 197,
/// IBM-PC Code Page 737 8-bit Greek/Latin
EL8PC737 = 382,
/// ASMO Extended 708 8-bit Latin/Arabic
AR8ASMO8X = 500,
/// Arabic MS-DOS 720 Server 8-bit Latin/Arabic
AR8ADOS720 = 558,
/// MS Windows Code Page 1256 8-Bit Latin/Arabic
AR8MSWIN1256 = 560,
/// EUC 24-bit Japanese
JA16EUC = 830,
/// Shift-JIS 16-bit Japanese
JA16SJIS = 832,
/// Same as `JA16EUC` except for the way that the wave dash and the tilde are mapped to and from Unicode
JA16EUCTILDE = 837,
/// Same as `JA16SJIS` except for the way that the wave dash and the tilde are mapped to and from Unicode
JA16SJISTILDE = 838,
/// KSC5601 16-bit Korean
KO16KSC5601 = 840,
/// MS Windows Code Page 949 Korean
KO16MSWIN949 = 846,
/// CGB2312-80 16-bit Simplified Chinese
ZHS16CGB231280 = 850,
/// GBK 16-bit Simplified Chinese
ZHS16GBK = 852,
/// GB18030 32-bit Simplified Chinese
ZHS32GB18030 = 854,
/// EUC 32-bit Traditional Chinese
ZHT32EUC = 860,
/// BIG5 16-bit Traditional Chinese
ZHT16BIG5 = 865,
/// MS Windows Code Page 950 Traditional Chinese
ZHT16MSWIN950 = 867,
/// MS Windows Code Page 950 with Hong Kong Supplementary Character Set HKSCS-2001 (character set conversion to and from Unicode is based on Unicode 3.0)
ZHT16HKSCS = 868,
/// Unicode 3.0 UTF-8 Universal character set, CESU-8 compliant
UTF8 = 871,
/// Unicode 7.0 UTF-8 Universal character set
AL32UTF8 = 873,
/// Unicode 7.0 UTF-16 Universal character set
AL16UTF16 = 2000,
}
impl Default for Charset {
fn default() -> Self {
Charset::Default
}
} | de {
| identifier_name |
traits.rs | //! Architecture trait
use crate::analysis::{Disasm, Mappable, RequisiteSet, Result, Trace};
use crate::arch::ArchName;
use crate::ast::Literal;
use crate::cli::Nameable;
use crate::maths::{Numerical, Popcount};
use crate::memory::{Memory, Offset, Pointer, PtrNum};
use crate::reg::{Bitwise, State};
use num::Bounded;
use serde::{Deserialize, Serialize};
use std::convert::TryInto;
use std::fmt::{Debug, Display};
use std::str::FromStr;
/// Indicates a `Literal` that is specifically compatible with a given
/// architecture's formatting needs.
pub trait CompatibleLiteral<AR>:
Literal
+ From<AR::Word>
+ From<AR::Byte>
+ From<AR::Offset>
+ From<AR::PtrVal>
+ From<AR::SignedWord>
+ From<Pointer<AR::PtrVal>>
where
AR: Architecture,
{
}
impl<T, AR> CompatibleLiteral<AR> for T
where
AR: Architecture,
T: Literal
+ From<AR::Word>
+ From<AR::Byte>
+ From<AR::Offset>
+ From<AR::PtrVal>
+ From<AR::SignedWord>
+ From<Pointer<AR::PtrVal>>,
{
}
/// Trait which represents all of the analysis methods an architecture
/// must provide in order to be supported.
pub trait Architecture
where
Self:'static
+ Copy
+ Debug
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Serialize
+ Send
+ Sync
+ Default,
Self::Register:
Mappable + Debug + Display + FromStr + Send + Sync + Serialize + for<'dw> Deserialize<'dw>,
Self::Word: Bitwise
+ Numerical
+ Popcount<Output = Self::Word>
+ TryInto<u64>
+ Mappable
+ Nameable
+ Ord
+ Bounded
+ Serialize
+ for<'dw> Deserialize<'dw>,
Self::Byte: Bitwise
+ Numerical
+ Popcount<Output = Self::Byte>
+ TryInto<u64>
+ Mappable
+ Nameable
+ Ord
+ Bounded
+ Serialize
+ for<'dw> Deserialize<'dw>,
Self::PtrVal:
PtrNum<Self::Offset> + Mappable + Nameable + Serialize + for<'dw> Deserialize<'dw>,
Self::Offset: Offset<Self::PtrVal>
+ Mappable
+ Nameable
+ Numerical
+ Serialize
+ for<'dw> Deserialize<'dw>,
{
/// The type which represents all possible register names in a given
/// architecture.
///
/// In some architectures, notably AArch32, the program counter is treated
/// as a normal register that can be operated upon. In such architectures,
/// you must either leave off that register from this type, or ensure that
/// it is always in synchronization with the contextualized program counter
/// when tracing code.
///
/// This type is customarily referred to as `RK` in other trait bounds.
type Register;
/// The type which represents a register value.
///
/// In the case that an architecture has multiple widths of registers, then
/// this type must either enumerate all possible register widths, or it
/// must use a representation wide enough to hold all of them and ensure
/// that any unused bits do not affect the results of tracing. It must also
/// ensure that register values intended for one type or width of register
/// do not get set on registers which cannot architecturally contain them
/// without being first converted.
///
/// This type is customarily referred to as `I` in other trait bounds.
type Word;
/// The type which represents a signed register value.
///
/// Assembler syntaxes that accept this particular architecture must
/// allow both signed and unsigned representations of the word type. It is
/// implied that the regular `Word` type is unsigned.
type SignedWord;
/// The type which represents a byte as addressed by memory.
///
/// In most modern architectures, bytes are 8 bits wide, and this should be
/// `u8`. Some exotic architectures are "word-addressed": incrementing an
/// address by one results in skipping more or less than eight bits in the
/// resulting memory. In that case, `Byte` would need to be wider or
/// narrower than 8 bits.
///
/// Note that most processors whose memory buses read or write more than
/// one byte at a time do *not* qualify as word-addressed; as reading the
/// next address still returns a byte even though the memory device it
/// comes from works in wider units of data.
///
/// This type is customarily referred to as `MV` in other trait bounds.
type Byte;
/// The type which represents this architecture's memory addresses.
///
/// An architecture is permitted to have non-numerical memory addresses,
/// such as architectures with separate I/O and memory address spaces. In
/// this case, you would use an enum type with an option for each separate
/// bus, and provide a separate `Offset` type which can be added to any
/// address to get a new one within the same bus.
///
/// This type is customarily referred to as `P` in other trait bounds.
type PtrVal;
/// The type which represents an offset from a given pointer value.
///
/// While architectures are allowed to provide multiple pointer value
/// representations, bundled together in an `enum`, every arm of the enum
/// must be able to support a numerical offset type that can be added to
/// any address to get a new one that many bytes further along.
///
/// This type is customarily referred to as `S` in other trait bounds.
type Offset;
/// Obtain this architecture's name.
fn name(&self) -> ArchName;
/// Inject architectural contexts from user-provided input intended to form
/// a valid contextual pointer.
///
/// Each architecture is allowed to specify it's own architectural
/// contexts, which are stored alongside platform contexts in the
/// `Pointer`. This function allows architectures to participate in context
/// parsing.
///
/// After parsing has completed, the context list given should be shortened
/// to exclude the contexts this function has processed, and those parsed
/// contexts should be provided to the `Pointer`. As a practical
/// convention, architectures should only parse contexts at the start or
/// end of a context list.
///
/// TODO: Why does this return `Option<()>`?!
fn parse_architectural_contexts(
contexts: &mut &[&str],
ptr: &mut Pointer<Self::PtrVal>,
) -> Option<()>;
/// Statically disassemble instructions from a given address on a given
/// platform.
///
/// The `L` type parameter is the literal type of the given assembler to be
/// used when disassembling the program. The `IO` type parameter represents
/// an offset into a program image, which may be a wider type than `Offset`
/// (e.g. if bank switchig is in use). It is almost always `usize`.
fn disassemble<L>(
&self,
at: &Pointer<Self::PtrVal>,
bus: &Memory<Self>,
) -> Result<Disasm<L, Self::PtrVal, Self::Offset>, Self>
where
L: CompatibleLiteral<Self>;
/// Statically determine the input and output requisites of a given
/// instruction.
///
/// This method allows building a dependency graph of a given block by
/// matching output requisites of a given instruction to input requisites
/// of future instructions.
fn dataflow(
&self,
at: &Pointer<Self::PtrVal>,
bus: &Memory<Self>,
) -> Result<(RequisiteSet<Self>, RequisiteSet<Self>), Self>;
/// Determine what register values or memory addresses are required to be
/// resolved in order for symbolic execution to continue at a given PC.
///
/// This function returns a list of `Prerequisite`s, as well as a flag to
/// indicate if the prerequisite list is complete or not. If the list is
/// incomplete, then after resolving those prerequisites, you must
/// reanalyze the program at the same position with the new state in order
/// to find more prerequisites. Otherwise, you may continue tracing.
///
/// A prerequisite should only be listed if symbolic tracing cannot
/// continue otherwise. If every symbolic value is listed as a prerequisite,
/// then the state space of symbolic tracing will explode far faster than
/// if symbolic execution is occurring. When a value is listed as a
/// prerequisite, the state is said to have been forced into forking. It is
/// permissible to force a fork for the following reasons:
///
/// * The program counter or a context it needs is unresolved
/// * Instruction contents are unresolved in memory
/// * Memory addresses being read or written to are unresolved
/// * The target address of a jump or call is unresolved
/// * Flags or other information necessary to determine if a jump or call
/// is taken or not taken are unresolved
///
/// The `IO` type parameter represents an offset into a program image,
/// which may be a wider type than `Offset` (e.g. if bank switchig is in
/// use). It is almost always `usize`.
fn prerequisites(
&self,
at: Self::PtrVal,
bus: &Memory<Self>,
state: &State<Self>,
) -> Result<(RequisiteSet<Self>, bool), Self>;
/// Advance the state of program execution by one instruction, producing a
/// new state and program counter to continue from.
///
/// This function may error if the given state is ambiguous enough to
/// disallow further execution. In order to find out why, you need to ask
/// the `prerequisites` function to get what needs to be fixed about the
/// state. In fact, you should always call it before calling this one.
///
/// A state and program counter that produce an empty prerequisites list
/// for a given program must always cause `trace` to return a valid
/// continuation of the program.
///
/// TODO: There is currently no representation of states that halt the
/// program.
fn trace( | at: Self::PtrVal,
bus: &Memory<Self>,
state: State<Self>,
trace: &mut Trace<Self>,
) -> Result<(State<Self>, Self::PtrVal), Self>;
}
/// Trait for type-erased structures that accept an architecture parameter and
/// are intended to be accessed with the `with_architecture!` macro.
pub trait AnyArch {
/// Name the architecture of this erased type.
fn arch(&self) -> ArchName;
} | &self, | random_line_split |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) {
self.extra_data.feature_tag = feature_tag
}
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g|!g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
} | #[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
} | }
| random_line_split |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) {
self.extra_data.feature_tag = feature_tag
}
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g|!g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() |
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
}
| {
continue;
} | conditional_block |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) {
self.extra_data.feature_tag = feature_tag
}
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g|!g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn | () {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
}
| test_artificial | identifier_name |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) |
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g|!g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
}
| {
self.extra_data.feature_tag = feature_tag
} | identifier_body |
kv.rs | 内存索引中。
/// 类似地,当删除一个键时,kvs 将 rm 命令写入日志,然后从内存索引中删除该键。
/// 当使用 get 命令检索键的值时,它检索索引,如果找到了,就从对应的日志指针上加载命令,执行命令并返回结果。
///
/// kvs 启动时,就会按从旧到新的顺序从日志中遍历并执行命令,内存索引也会对应的重建。
///
/// 当日志条数达到给定阈值时,kvs 会其压缩为一个新日志,删除冗余日志以回收磁盘空间。
///
/// 注意,kvs 项目既是一个无状态命令行程序,也是一个包含有状态 KVStore 类型的库:
/// 对于 CLI,使用 KVStore 类型将加载索引,执行命令,然后退出;对于库使用,它将加载索引,然后执行多个命令,维护索引状态,直到它被删除。
/// ref: https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
path: PathBuf,
// 数字到文件的映射
readers: HashMap<u64, BufReaderWithPos<File>>,
// 当前用于写的日志文件
writer: BufWriterWithPos<File>,
// 存在内存中的索引
index: BTreeMap<String, CommandPos>,
// inner: Arc<RwLock<IndexMap<Vec<u8>, Vec<u8>>>>,
/// 记录当前所写入的文件标号
current_gen: u64,
/// 记录过期/无效的(可被删除的)值的字节数量
uncompacted: u64,
}
#[derive(Debug)]
struct BufWriterWithPos<W: Write + Seek> {
writer: BufWriter<W>,
pos: u64,
}
impl<W: Write + Seek> BufWriterWithPos<W> {
fn new(mut inner: W) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0));
Ok(BufWriterWithPos {
writer: BufWriter::new(inner),
pos: 0,
})
}
}
impl<W: Write + Seek> Write for BufWriterWithPos<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = self.writer.write(buf)?;
self.pos += len as u64;
Ok(len)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
struct BufReaderWithPos<R: Read + Seek> {
reader: BufReader<R>,
pos: u64,
}
impl<R: Read + Seek> BufReaderWithPos<R> {
fn new(mut inner: R) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0))?;
Ok(BufReaderWithPos {
reader: BufReader::new(inner),
pos,
})
}
}
// 将目录中的文件列表按名字进行排序,以便得到有序的日志文件列表
fn sorted_gen_list(path: PathBuf) -> Result<Vec<u64>> {
let mut gen_list: Vec<u64> = std::fs::read_dir(&path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file() && path.extension() == Some("log".as_ref()))
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.trim_end_matches(".log"))
.map(str::parse::<u64>)
})
.flatten()
.collect();
gen_list.sort_unstable();
Ok(gen_list)
}
fn log_path(dir: &Path, gen: u64) -> PathBuf {
dir.join(format!("{}.log", gen))
}
/// 通过文件序号,从对应的文件中读取指令并生成对应的索引加载到内存中(BTreeMap)
fn load(
gen: u64,
reader: &mut BufReaderWithPos<File>,
index: &mut BTreeMap<String, CommandPos>,
) -> Result<u64> {
// 确定从文件的某个位置开始读
let mut pos = reader.seek(SeekFrom::Start(0))?;
let mut stream = Deserializer::from_reader(reader).into_iter::<Command>();
// 通过压缩的手段可节省的字节数
let mut uncompacted = 0;
while let Some(cmd) = stream.next() {
// 匹配到下一条指令所对应的 offset
let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key,.. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> Result<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key,.. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value,.. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value)) | Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos!= cmd_pos.pos {
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
}
let mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".to_string();
dbg!(st.get(cache_key.to_string()).unwrap());
}
#[test]
// fn test_store_delete() {
// let mut st = KVStore::new();
// let cache_key: Vec<u8> = "org_1001_info".as_bytes().into();
// st.set(cache_key.clone(), "hello org".as_bytes().into());
// assert_eq!(st.delete(&cache_key), Some("hello org".as_bytes().into()));
// assert_eq!(st.get(&cache_key), None);
// }
#[test]
fn test_sorted_gen_list() {
let res = sorted_gen_list(PathBuf::from("./"));
dbg!(&res);
}
#[test]
fn test_serde() {
// 通过 serde_json 可以实现“流”方式的贪婪匹配对象(反序列化)
let data = b"[10] [1] [2]";
let de = serde_json::Deserializer::from_slice(data);
let mut stream = de.into_iter::<Vec<i32>>();
dbg!(stream.byte_offset()); // 0
dbg!(stream.next()); // Some([10])
dbg!(stream.byte_offset()); // 4
dbg!(stream.next()); // Some([1])
dbg!(stream.byte_offset()); // 8
dbg!(stream.next()); // Some([2])
dbg!(stream.byte_offset()); // 12
}
#[test]
fn test_read_dir() {
let res = read_dir("./");
assert!(res.is_ok());
}
#[test]
fn test_create_dir() {
// 执行时,`./` 指的是项目根目录
let res = create_dir("./test-dir");
assert!(res.is_ok());
}
#[test]
fn test_new_log_file() {
let mut hs: HashMap<u64, BufReaderWithPos<File>> = HashMap::new();
let res = new_log_file(Path::new("./data"), 0, &mut hs);
dbg!(res);
}
#[test]
fn test_command_pos() {
// Into trait 的使用和了解
let c1: CommandPos = (1, 2..17).into();
dbg!(c1);
}
}
/*
>* 资料来源:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
### 部分 1:错误处理
在这个项目中,I/O 错误会导致代码执行失败。因此,在完全实现数据库之前,我们还需要确定一件
至关重要的事:错误处理策略。
Rust 的错误处理很强大,但需要以合适的方式使用多个样板文件,而对于这个项目,failure 库将提供便捷的错误处理工具。
failure 库的指南中描述了几种错误处理模式。
我们选择其中一种策略,然后在库中可以定义自己的错误类型,也可以导入其他 Error。这个策略对应的错误类型将会在项目中的 Result 中使用,
可以使用 `?` 操作符把其他库中的错误类型转换为自己库的错误类型。
这样,为 Result 定义一个含有错误类型的类型别名,编码时就不需要到处输入 Result<T, YourErrorType>,而可以简单的输入 Result。这是一种非常常见的 Rust 模式。
最后,使用 use 语句将这些类型导入到代码中,然后将 main 函数的签名的返回值部分修改为 `Result<()>`。
运行 `cargo check` 可以用编译器检查错误,然后修复这些错误。现在可以先使用 `panic!()` 来结束 `main` 函数,从而通过编译。
在前进之前,先确定好你的错误处理策略。
与之前的项目一样,你可以创建用于占位的数据结构和方法,以便跑通测试用例。现在你定义一个错误类型,这很简单。然后在所有需要编译测试用例的地方添加 panic(`cargo test --no-run`)。
注意:Rust 中的“错误处理”仍在发展和改进中。本课程目前使用 [`failure`](https://docs.rs/failure/0.1.5/failure/) 库定义错误类型更容易。虽然 `failure` 设计不错,但它的使用[不是最佳实践](https://github.com/rust-lang-nursery/rust-cookbook/issues/502#issue-387418261)。Rust 专家可能会开发出更好的错误处理方式。
在后面的课程中有可能不会一直使用 `failure`。于此同时,它也是一个不错的选择,它能用于学习 Rust 错误处理的演进以及优化。
### 部分 2:log 的作用和原理
现在我们终于要开始从磁盘读写来实现一个真正的数据库。我们将使用 [serde](https://serde.rs/) 来把 "set" 和 "rm" 指令序列化为字符串,然后用标准的文件 I/O 接口来写到硬盘上。
下面这些是 `kvs` 最基本的日志行文:
* "set"
* 用户调用 `kvs set mykey myvalue`
* `kvs` 创建 set 指令包含的值,其中有 key 和 value
* 然后,程序将指令序列化为 `String`
* 然后,把序列化的指令追加到日志文件中
* 如果成功了,则以错误码 0 静默地退出
* 如果失败了,就打印错误,并返回非 0 地错误代码并退出
* "get"
* 用户调用指令:`kvs get mykey`
* kvs 每次读取一条指令,将相应受影响的 key 和文件偏移量记录到内存的 map 中,即 key -> 日志指针
* 然后,检查 map 中的日志指针
* 如果失败,则打印“Key not found”,并以代码 0 退出
* 如果成功
* 它将指令日志反序列化得到最后的记录中的 key 和值
* 然后将结果打印到标准输出,并以代码 0 退出
* "rm"
* 用户调用指令 `kvs rm mykey`
* 和 get 指令一样,kvs 读取整条日志来在内存中构建索引
* 然后,它检查 map 中是否存在给定的 key
* 如果不存在,就返回“Key not found”
* 如果成功,将会创建对应的 rm 指令,其中包含了 key
* 然后将指令序列化后追加到日志中
* 如果成功,则以错误码 0 静默退出
日志是提交到数据库的事务记录。通过在启动时,“重建”(replaying)日志中的记录,我们就可以重现数据库在某个时间点的特定状态。
在这个迭代中,你可以将键的值直接存储在内存中(因此在重启或重建时是不会从日志中读取内容的)。在后面的迭代中,只需将日志指针(文件偏移量)存储到日志中。
### 部分 3:log 的写入
我们将从 set 开始。接下来将会有很多步骤。但大部分都比较容易实现,你可以通过运行 `cli_*` 相关测试用例来验证你的实现。
`serde` 是一个大型库,有许多功能选项,支持多种序列化格式。基本的序列化和反序列化只需要对结构体进行合适的注解,然后调用一个函数将序列化后的内容写入 `String` 或者 `Write` 流。
你需要选择一种序列化格式。并确定你需要的属性 —— 你是否需要性能优先?你希望以纯文本形式读取日志内容吗?这都在于你如何配置,但你记得在代码中写好注释。
还有其他因素要考虑一下:系统在哪设置缓冲,以及哪些地方需要?缓冲后续的影响是什么?何时打开和关闭文件句柄?有哪些支持的命令?`KvStore` 的生命周期是什么?
你调用的一些 api 可能会失败,并返回错误类型的 `Result`。你需要确保调用函数会返回你自己设定的错误类型的 `Result`,并用 `?` 向上传递。
类似于 rm 命令,我们希望在把命令写入日志之前,还要检查 key 是否存在。因为两种场景需要区分开,所以可以使用 enum 类型的变体来统一所有命令。`serde` 可以完美地与枚举一起使用。
你现在可以实现 set 和 rm 命令了,重点放在 set / rm 对应的测试用例上,也可以阅读下一节的 get 命令实现。记住这两个命令并加以实现,会对你很有帮助。选择权在你。
### 部分 4:log 的读取
现在该实现 get 了。在这一部分中,你不需要把日志指针存储在索引中,而将其放到下一节进行实现。这一节我们只需在启动时,读取日志中的所有命令,执行它们将每个键值对保存在内存中。然后根据需要从内存中读取。
应该一次性把日志内容全部读取到内存中并通过 map 类型来重现数据吗;需要在某个时候读取一条日志从而重现 map 中的某条数据吗?应该在序列化、反序列化之前将其从文件系统中读取到 buffer 中吗?想想你使用内存的方式。考虑一下与内核交互是否是从 I/O 流读取数据。
记住,"get" 可能获取不到值,这种情况下,需要特殊处理。这里,我们的 API 返回 `None`,然后客户端打印一个特定的消息,并以零代码退出。
读取日志有一个复杂点,你在编写 set 时,可能已经想到了:如何区分日志中的记录?也就是说,如何终止读取,何时开始读取下一条记录?需要这样实现吗?也许 serde 将直接从 I/O 流中序列化一条记录,并在操作完后停止读取,将游标停留在正确的位置,以便读取后续的记录。也许 serde 在检查到两条背靠背(back-to-back)的记录时会报错。也许你需要插入额外的信息来区分每个记录的长度,也有可能有其他方式。
_现在要实现 “get” 了_
### 部分 5:在索引中存储 log 的指针
此时,除压缩数据相关的测试以外,其他测试应该都是通过的。接下来的步骤是一些性能优化和存储优化。当你实现它们时,需要注意它们的意义是什么?
正如我们前面描述的那样,我们所实现的数据库是在内存中维护所有的 key 索引。这个索引映射到字符串指针(值内容),而非 key 本身的内容。
这个更改就需要我们可以从任意偏移量处读取日志。想一想,这将怎样影响我们对文件的处理。
如果在前面的步骤中,你选择将字符串直接存在内存中,那现在需要调整代码为存储日志指针的方式,并根据需要从磁盘中加载 | } else { | random_line_split |
kv.rs | 索引中。
/// 类似地,当删除一个键时,kvs 将 rm 命令写入日志,然后从内存索引中删除该键。
/// 当使用 get 命令检索键的值时,它检索索引,如果找到了,就从对应的日志指针上加载命令,执行命令并返回结果。
///
/// kvs 启动时,就会按从旧到新的顺序从日志中遍历并执行命令,内存索引也会对应的重建。
///
/// 当日志条数达到给定阈值时,kvs 会其压缩为一个新日志,删除冗余日志以回收磁盘空间。
///
/// 注意,kvs 项目既是一个无状态命令行程序,也是一个包含有状态 KVStore 类型的库:
/// 对于 CLI,使用 KVStore 类型将加载索引,执行命令,然后退出;对于库使用,它将加载索引,然后执行多个命令,维护索引状态,直到它被删除。
/// ref: https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
path: PathBuf,
// 数字到文件的映射
readers: HashMap<u64, BufReaderWithPos<File>>,
// 当前用于写的日志文件
writer: BufWriterWithPos<File>,
// 存在内存中的索引
index: BTreeMap<String, CommandPos>,
// inner: Arc<RwLock<IndexMap<Vec<u8>, Vec<u8>>>>,
/// 记录当前所写入的文件标号
current_gen: u64,
/// 记录过期/无效的(可被删除的)值的字节数量
uncompacted: u64,
}
#[derive(Debug)]
struct BufWriterWithPos<W: Write + Seek> {
writer: BufWriter<W>,
pos: u64,
}
impl<W: Write + Seek> BufWriterWithPos<W> {
fn new(mut inner: W) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0));
Ok(BufWriterWithPos {
writer: BufWriter::new(inner),
pos: 0,
})
}
}
impl<W: Write + Seek> Write for BufWriterWithPos<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = self.writer.write(buf)?;
self.pos += len as u64;
Ok(len)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
struct BufReaderWithPos<R: Read + Seek> {
reader: BufReader<R>,
pos: u64,
}
impl<R: Read + Seek> BufReaderWithPos<R> {
fn new(mut inner: R) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0))?;
Ok(BufReaderWithPos {
reader: BufReader::new(inner),
pos,
})
}
}
// 将目录中的文件列表按名字进行排序,以便得到有序的日志文件列表
fn sorted_gen_list(path: PathBuf) -> Result<Vec<u64>> {
let mut gen_list: Vec<u64> = std::fs::read_dir(&path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file() && path.extension() == Some("log".as_ref()))
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.trim_end_matches(".log"))
.map(str::parse::<u64>)
})
.flatten()
.collect();
gen_list.sort_unstable();
Ok(gen_list)
}
fn log_path(dir: &Path, gen: u64) -> PathBuf {
dir.join(format!("{}.log", gen))
}
/// 通过文件序号,从对应的文件中读取指令并生成对应的索引加载到内存中(BTreeMap)
fn load(
gen: u64,
reader: &mut BufReaderWithPos<File>,
index: &mut BTreeMap<String, CommandPos>,
) -> Result<u64> {
// 确定从文件的某个位置开始读
let mut pos = reader.seek(SeekFrom::Start(0))?;
let mut stream = Deserializer::from_reader(reader).into_iter::<Command>();
// 通过压缩的手段可节省的字节数
let mut uncompacted = 0;
while let Some(cmd) = stream.next() {
// 匹配到下一条指令所对应的 offset
let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key,.. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> Result<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key,.. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value,.. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value))
} else {
Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos!= cmd_pos.pos {
reader.seek(SeekFrom::Start | mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".to_string();
dbg!(st.get(cache_key.to_string()).unwrap());
}
#[test]
// fn test_store_delete() {
// let mut st = KVStore::new();
// let cache_key: Vec<u8> = "org_1001_info".as_bytes().into();
// st.set(cache_key.clone(), "hello org".as_bytes().into());
// assert_eq!(st.delete(&cache_key), Some("hello org".as_bytes().into()));
// assert_eq!(st.get(&cache_key), None);
// }
#[test]
fn test_sorted_gen_list() {
let res = sorted_gen_list(PathBuf::from("./"));
dbg!(&res);
}
#[test]
fn test_serde() {
// 通过 serde_json 可以实现“流”方式的贪婪匹配对象(反序列化)
let data = b"[10] [1] [2]";
let de = serde_json::Deserializer::from_slice(data);
let mut stream = de.into_iter::<Vec<i32>>();
dbg!(stream.byte_offset()); // 0
dbg!(stream.next()); // Some([10])
dbg!(stream.byte_offset()); // 4
dbg!(stream.next()); // Some([1])
dbg!(stream.byte_offset()); // 8
dbg!(stream.next()); // Some([2])
dbg!(stream.byte_offset()); // 12
}
#[test]
fn test_read_dir() {
let res = read_dir("./");
assert!(res.is_ok());
}
#[test]
fn test_create_dir() {
// 执行时,`./` 指的是项目根目录
let res = create_dir("./test-dir");
assert!(res.is_ok());
}
#[test]
fn test_new_log_file() {
let mut hs: HashMap<u64, BufReaderWithPos<File>> = HashMap::new();
let res = new_log_file(Path::new("./data"), 0, &mut hs);
dbg!(res);
}
#[test]
fn test_command_pos() {
// Into trait 的使用和了解
let c1: CommandPos = (1, 2..17).into();
dbg!(c1);
}
}
/*
>* 资料来源:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
### 部分 1:错误处理
在这个项目中,I/O 错误会导致代码执行失败。因此,在完全实现数据库之前,我们还需要确定一件
至关重要的事:错误处理策略。
Rust 的错误处理很强大,但需要以合适的方式使用多个样板文件,而对于这个项目,failure 库将提供便捷的错误处理工具。
failure 库的指南中描述了几种错误处理模式。
我们选择其中一种策略,然后在库中可以定义自己的错误类型,也可以导入其他 Error。这个策略对应的错误类型将会在项目中的 Result 中使用,
可以使用 `?` 操作符把其他库中的错误类型转换为自己库的错误类型。
这样,为 Result 定义一个含有错误类型的类型别名,编码时就不需要到处输入 Result<T, YourErrorType>,而可以简单的输入 Result。这是一种非常常见的 Rust 模式。
最后,使用 use 语句将这些类型导入到代码中,然后将 main 函数的签名的返回值部分修改为 `Result<()>`。
运行 `cargo check` 可以用编译器检查错误,然后修复这些错误。现在可以先使用 `panic!()` 来结束 `main` 函数,从而通过编译。
在前进之前,先确定好你的错误处理策略。
与之前的项目一样,你可以创建用于占位的数据结构和方法,以便跑通测试用例。现在你定义一个错误类型,这很简单。然后在所有需要编译测试用例的地方添加 panic(`cargo test --no-run`)。
注意:Rust 中的“错误处理”仍在发展和改进中。本课程目前使用 [`failure`](https://docs.rs/failure/0.1.5/failure/) 库定义错误类型更容易。虽然 `failure` 设计不错,但它的使用[不是最佳实践](https://github.com/rust-lang-nursery/rust-cookbook/issues/502#issue-387418261)。Rust 专家可能会开发出更好的错误处理方式。
在后面的课程中有可能不会一直使用 `failure`。于此同时,它也是一个不错的选择,它能用于学习 Rust 错误处理的演进以及优化。
### 部分 2:log 的作用和原理
现在我们终于要开始从磁盘读写来实现一个真正的数据库。我们将使用 [serde](https://serde.rs/) 来把 "set" 和 "rm" 指令序列化为字符串,然后用标准的文件 I/O 接口来写到硬盘上。
下面这些是 `kvs` 最基本的日志行文:
* "set"
* 用户调用 `kvs set mykey myvalue`
* `kvs` 创建 set 指令包含的值,其中有 key 和 value
* 然后,程序将指令序列化为 `String`
* 然后,把序列化的指令追加到日志文件中
* 如果成功了,则以错误码 0 静默地退出
* 如果失败了,就打印错误,并返回非 0 地错误代码并退出
* "get"
* 用户调用指令:`kvs get mykey`
* kvs 每次读取一条指令,将相应受影响的 key 和文件偏移量记录到内存的 map 中,即 key -> 日志指针
* 然后,检查 map 中的日志指针
* 如果失败,则打印“Key not found”,并以代码 0 退出
* 如果成功
* 它将指令日志反序列化得到最后的记录中的 key 和值
* 然后将结果打印到标准输出,并以代码 0 退出
* "rm"
* 用户调用指令 `kvs rm mykey`
* 和 get 指令一样,kvs 读取整条日志来在内存中构建索引
* 然后,它检查 map 中是否存在给定的 key
* 如果不存在,就返回“Key not found”
* 如果成功,将会创建对应的 rm 指令,其中包含了 key
* 然后将指令序列化后追加到日志中
* 如果成功,则以错误码 0 静默退出
日志是提交到数据库的事务记录。通过在启动时,“重建”(replaying)日志中的记录,我们就可以重现数据库在某个时间点的特定状态。
在这个迭代中,你可以将键的值直接存储在内存中(因此在重启或重建时是不会从日志中读取内容的)。在后面的迭代中,只需将日志指针(文件偏移量)存储到日志中。
### 部分 3:log 的写入
我们将从 set 开始。接下来将会有很多步骤。但大部分都比较容易实现,你可以通过运行 `cli_*` 相关测试用例来验证你的实现。
`serde` 是一个大型库,有许多功能选项,支持多种序列化格式。基本的序列化和反序列化只需要对结构体进行合适的注解,然后调用一个函数将序列化后的内容写入 `String` 或者 `Write` 流。
你需要选择一种序列化格式。并确定你需要的属性 —— 你是否需要性能优先?你希望以纯文本形式读取日志内容吗?这都在于你如何配置,但你记得在代码中写好注释。
还有其他因素要考虑一下:系统在哪设置缓冲,以及哪些地方需要?缓冲后续的影响是什么?何时打开和关闭文件句柄?有哪些支持的命令?`KvStore` 的生命周期是什么?
你调用的一些 api 可能会失败,并返回错误类型的 `Result`。你需要确保调用函数会返回你自己设定的错误类型的 `Result`,并用 `?` 向上传递。
类似于 rm 命令,我们希望在把命令写入日志之前,还要检查 key 是否存在。因为两种场景需要区分开,所以可以使用 enum 类型的变体来统一所有命令。`serde` 可以完美地与枚举一起使用。
你现在可以实现 set 和 rm 命令了,重点放在 set / rm 对应的测试用例上,也可以阅读下一节的 get 命令实现。记住这两个命令并加以实现,会对你很有帮助。选择权在你。
### 部分 4:log 的读取
现在该实现 get 了。在这一部分中,你不需要把日志指针存储在索引中,而将其放到下一节进行实现。这一节我们只需在启动时,读取日志中的所有命令,执行它们将每个键值对保存在内存中。然后根据需要从内存中读取。
应该一次性把日志内容全部读取到内存中并通过 map 类型来重现数据吗;需要在某个时候读取一条日志从而重现 map 中的某条数据吗?应该在序列化、反序列化之前将其从文件系统中读取到 buffer 中吗?想想你使用内存的方式。考虑一下与内核交互是否是从 I/O 流读取数据。
记住,"get" 可能获取不到值,这种情况下,需要特殊处理。这里,我们的 API 返回 `None`,然后客户端打印一个特定的消息,并以零代码退出。
读取日志有一个复杂点,你在编写 set 时,可能已经想到了:如何区分日志中的记录?也就是说,如何终止读取,何时开始读取下一条记录?需要这样实现吗?也许 serde 将直接从 I/O 流中序列化一条记录,并在操作完后停止读取,将游标停留在正确的位置,以便读取后续的记录。也许 serde 在检查到两条背靠背(back-to-back)的记录时会报错。也许你需要插入额外的信息来区分每个记录的长度,也有可能有其他方式。
_现在要实现 “get” 了_
### 部分 5:在索引中存储 log 的指针
此时,除压缩数据相关的测试以外,其他测试应该都是通过的。接下来的步骤是一些性能优化和存储优化。当你实现它们时,需要注意它们的意义是什么?
正如我们前面描述的那样,我们所实现的数据库是在内存中维护所有的 key 索引。这个索引映射到字符串指针(值内容),而非 key 本身的内容。
这个更改就需要我们可以从任意偏移量处读取日志。想一想,这将怎样影响我们对文件的处理。
如果在前面的步骤中,你选择将字符串直接存在内存中,那现在需要调整代码为存储日志指针的方式,并根据需要从磁盘中 | (cmd_pos.pos))?;
}
let | conditional_block |
kv.rs | 索引中。
/// 类似地,当删除一个键时,kvs 将 rm 命令写入日志,然后从内存索引中删除该键。
/// 当使用 get 命令检索键的值时,它检索索引,如果找到了,就从对应的日志指针上加载命令,执行命令并返回结果。
///
/// kvs 启动时,就会按从旧到新的顺序从日志中遍历并执行命令,内存索引也会对应的重建。
///
/// 当日志条数达到给定阈值时,kvs 会其压缩为一个新日志,删除冗余日志以回收磁盘空间。
///
/// 注意,kvs 项目既是一个无状态命令行程序,也是一个包含有状态 KVStore 类型的库:
/// 对于 CLI,使用 KVStore 类型将加载索引,执行命令,然后退出;对于库使用,它将加载索引,然后执行多个命令,维护索引状态,直到它被删除。
/// ref: https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
path: PathBuf,
// 数字到文件的映射
readers: HashMap<u64, BufReaderWithPos<File>>,
// 当前用于写的日志文件
writer: BufWriterWithPos<File>,
// 存在内存中的索引
index: BTreeMap<String, CommandPos>,
// inner: Arc<RwLock<IndexMap<Vec<u8>, Vec<u8>>>>,
/// 记录当前所写入的文件标号
current_gen: u64,
/// 记录过期/无效的(可被删除的)值的字节数量
uncompacted: u64,
}
#[derive(Debug)]
struct BufWriterWithPos<W: Write + Seek> {
writer: BufWriter<W>,
pos: u64,
}
impl<W: Write + Seek> BufWriterWithPos<W> {
fn new(mut inner: W) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0));
Ok(BufWriterWithPos {
writer: BufWriter::new(inner),
pos: 0,
})
}
}
impl<W: Write + Seek> Write for BufWriterWithPos<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = self.writer.write(buf)?;
self.pos += len as u64;
Ok(len)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
struct BufReaderWithPos<R: Read + Seek> {
reader: BufReader<R>,
pos: u64,
}
impl<R: Read + Seek> BufReaderWithPos<R> {
fn new(mut inner: R) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0))?;
Ok(BufReaderWithPos {
reader: BufReader::new(inner),
pos,
})
}
}
// 将目录中的文件列表按名字进行排序,以便得到有序的日志文件列表
fn sorted_gen_list(path: PathBuf) -> Result<Vec<u64>> {
let mut gen_list: Vec<u64> = std::fs::read_dir(&path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file() && path.extension() == Some("log".as_ref()))
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.trim_end_matches(".log"))
.map(str::parse::<u64>)
})
.flatten()
.collect();
gen_list.sort_unstable();
Ok(gen_list)
}
fn log_path(dir: &Path, gen: u64) -> PathBuf {
dir.join(format!("{}.log", gen))
}
/// 通过文件序号,从对应的文件中读取指令并生成对应的索引加载到内存中(BTreeMap)
fn load(
gen: u64,
reader: &mut BufReaderWithPos<File>,
index: &mut BTreeMap<String, CommandPos>,
) -> Result<u64> {
// 确定从文件的某个位置开始读
let mut pos = reader.seek(SeekFrom::Start(0))?;
let mut stream = Deserializer::from_reader(reader).into_iter::<Command>();
// 通过压缩的手段可节省的字节数
let mut uncompacted = 0;
while let Some(cmd) = stream.next() {
// 匹配到下一条指令所对应的 offset
let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key,.. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> | ult<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key,.. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value,.. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value))
} else {
Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos!= cmd_pos.pos {
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
}
let mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".to_string();
dbg!(st.get(cache_key.to_string()).unwrap());
}
#[test]
// fn test_store_delete() {
// let mut st = KVStore::new();
// let cache_key: Vec<u8> = "org_1001_info".as_bytes().into();
// st.set(cache_key.clone(), "hello org".as_bytes().into());
// assert_eq!(st.delete(&cache_key), Some("hello org".as_bytes().into()));
// assert_eq!(st.get(&cache_key), None);
// }
#[test]
fn test_sorted_gen_list() {
let res = sorted_gen_list(PathBuf::from("./"));
dbg!(&res);
}
#[test]
fn test_serde() {
// 通过 serde_json 可以实现“流”方式的贪婪匹配对象(反序列化)
let data = b"[10] [1] [2]";
let de = serde_json::Deserializer::from_slice(data);
let mut stream = de.into_iter::<Vec<i32>>();
dbg!(stream.byte_offset()); // 0
dbg!(stream.next()); // Some([10])
dbg!(stream.byte_offset()); // 4
dbg!(stream.next()); // Some([1])
dbg!(stream.byte_offset()); // 8
dbg!(stream.next()); // Some([2])
dbg!(stream.byte_offset()); // 12
}
#[test]
fn test_read_dir() {
let res = read_dir("./");
assert!(res.is_ok());
}
#[test]
fn test_create_dir() {
// 执行时,`./` 指的是项目根目录
let res = create_dir("./test-dir");
assert!(res.is_ok());
}
#[test]
fn test_new_log_file() {
let mut hs: HashMap<u64, BufReaderWithPos<File>> = HashMap::new();
let res = new_log_file(Path::new("./data"), 0, &mut hs);
dbg!(res);
}
#[test]
fn test_command_pos() {
// Into trait 的使用和了解
let c1: CommandPos = (1, 2..17).into();
dbg!(c1);
}
}
/*
>* 资料来源:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
### 部分 1:错误处理
在这个项目中,I/O 错误会导致代码执行失败。因此,在完全实现数据库之前,我们还需要确定一件
至关重要的事:错误处理策略。
Rust 的错误处理很强大,但需要以合适的方式使用多个样板文件,而对于这个项目,failure 库将提供便捷的错误处理工具。
failure 库的指南中描述了几种错误处理模式。
我们选择其中一种策略,然后在库中可以定义自己的错误类型,也可以导入其他 Error。这个策略对应的错误类型将会在项目中的 Result 中使用,
可以使用 `?` 操作符把其他库中的错误类型转换为自己库的错误类型。
这样,为 Result 定义一个含有错误类型的类型别名,编码时就不需要到处输入 Result<T, YourErrorType>,而可以简单的输入 Result。这是一种非常常见的 Rust 模式。
最后,使用 use 语句将这些类型导入到代码中,然后将 main 函数的签名的返回值部分修改为 `Result<()>`。
运行 `cargo check` 可以用编译器检查错误,然后修复这些错误。现在可以先使用 `panic!()` 来结束 `main` 函数,从而通过编译。
在前进之前,先确定好你的错误处理策略。
与之前的项目一样,你可以创建用于占位的数据结构和方法,以便跑通测试用例。现在你定义一个错误类型,这很简单。然后在所有需要编译测试用例的地方添加 panic(`cargo test --no-run`)。
注意:Rust 中的“错误处理”仍在发展和改进中。本课程目前使用 [`failure`](https://docs.rs/failure/0.1.5/failure/) 库定义错误类型更容易。虽然 `failure` 设计不错,但它的使用[不是最佳实践](https://github.com/rust-lang-nursery/rust-cookbook/issues/502#issue-387418261)。Rust 专家可能会开发出更好的错误处理方式。
在后面的课程中有可能不会一直使用 `failure`。于此同时,它也是一个不错的选择,它能用于学习 Rust 错误处理的演进以及优化。
### 部分 2:log 的作用和原理
现在我们终于要开始从磁盘读写来实现一个真正的数据库。我们将使用 [serde](https://serde.rs/) 来把 "set" 和 "rm" 指令序列化为字符串,然后用标准的文件 I/O 接口来写到硬盘上。
下面这些是 `kvs` 最基本的日志行文:
* "set"
* 用户调用 `kvs set mykey myvalue`
* `kvs` 创建 set 指令包含的值,其中有 key 和 value
* 然后,程序将指令序列化为 `String`
* 然后,把序列化的指令追加到日志文件中
* 如果成功了,则以错误码 0 静默地退出
* 如果失败了,就打印错误,并返回非 0 地错误代码并退出
* "get"
* 用户调用指令:`kvs get mykey`
* kvs 每次读取一条指令,将相应受影响的 key 和文件偏移量记录到内存的 map 中,即 key -> 日志指针
* 然后,检查 map 中的日志指针
* 如果失败,则打印“Key not found”,并以代码 0 退出
* 如果成功
* 它将指令日志反序列化得到最后的记录中的 key 和值
* 然后将结果打印到标准输出,并以代码 0 退出
* "rm"
* 用户调用指令 `kvs rm mykey`
* 和 get 指令一样,kvs 读取整条日志来在内存中构建索引
* 然后,它检查 map 中是否存在给定的 key
* 如果不存在,就返回“Key not found”
* 如果成功,将会创建对应的 rm 指令,其中包含了 key
* 然后将指令序列化后追加到日志中
* 如果成功,则以错误码 0 静默退出
日志是提交到数据库的事务记录。通过在启动时,“重建”(replaying)日志中的记录,我们就可以重现数据库在某个时间点的特定状态。
在这个迭代中,你可以将键的值直接存储在内存中(因此在重启或重建时是不会从日志中读取内容的)。在后面的迭代中,只需将日志指针(文件偏移量)存储到日志中。
### 部分 3:log 的写入
我们将从 set 开始。接下来将会有很多步骤。但大部分都比较容易实现,你可以通过运行 `cli_*` 相关测试用例来验证你的实现。
`serde` 是一个大型库,有许多功能选项,支持多种序列化格式。基本的序列化和反序列化只需要对结构体进行合适的注解,然后调用一个函数将序列化后的内容写入 `String` 或者 `Write` 流。
你需要选择一种序列化格式。并确定你需要的属性 —— 你是否需要性能优先?你希望以纯文本形式读取日志内容吗?这都在于你如何配置,但你记得在代码中写好注释。
还有其他因素要考虑一下:系统在哪设置缓冲,以及哪些地方需要?缓冲后续的影响是什么?何时打开和关闭文件句柄?有哪些支持的命令?`KvStore` 的生命周期是什么?
你调用的一些 api 可能会失败,并返回错误类型的 `Result`。你需要确保调用函数会返回你自己设定的错误类型的 `Result`,并用 `?` 向上传递。
类似于 rm 命令,我们希望在把命令写入日志之前,还要检查 key 是否存在。因为两种场景需要区分开,所以可以使用 enum 类型的变体来统一所有命令。`serde` 可以完美地与枚举一起使用。
你现在可以实现 set 和 rm 命令了,重点放在 set / rm 对应的测试用例上,也可以阅读下一节的 get 命令实现。记住这两个命令并加以实现,会对你很有帮助。选择权在你。
### 部分 4:log 的读取
现在该实现 get 了。在这一部分中,你不需要把日志指针存储在索引中,而将其放到下一节进行实现。这一节我们只需在启动时,读取日志中的所有命令,执行它们将每个键值对保存在内存中。然后根据需要从内存中读取。
应该一次性把日志内容全部读取到内存中并通过 map 类型来重现数据吗;需要在某个时候读取一条日志从而重现 map 中的某条数据吗?应该在序列化、反序列化之前将其从文件系统中读取到 buffer 中吗?想想你使用内存的方式。考虑一下与内核交互是否是从 I/O 流读取数据。
记住,"get" 可能获取不到值,这种情况下,需要特殊处理。这里,我们的 API 返回 `None`,然后客户端打印一个特定的消息,并以零代码退出。
读取日志有一个复杂点,你在编写 set 时,可能已经想到了:如何区分日志中的记录?也就是说,如何终止读取,何时开始读取下一条记录?需要这样实现吗?也许 serde 将直接从 I/O 流中序列化一条记录,并在操作完后停止读取,将游标停留在正确的位置,以便读取后续的记录。也许 serde 在检查到两条背靠背(back-to-back)的记录时会报错。也许你需要插入额外的信息来区分每个记录的长度,也有可能有其他方式。
_现在要实现 “get” 了_
### 部分 5:在索引中存储 log 的指针
此时,除压缩数据相关的测试以外,其他测试应该都是通过的。接下来的步骤是一些性能优化和存储优化。当你实现它们时,需要注意它们的意义是什么?
正如我们前面描述的那样,我们所实现的数据库是在内存中维护所有的 key 索引。这个索引映射到字符串指针(值内容),而非 key 本身的内容。
这个更改就需要我们可以从任意偏移量处读取日志。想一想,这将怎样影响我们对文件的处理。
如果在前面的步骤中,你选择将字符串直接存在内存中,那现在需要调整代码为存储日志指针的方式,并根据需要从 | Res | identifier_name |
output.rs | //! Types and functions related to graphical outputs
//!
//! This modules provides two main elements. The first is the
//! [`OutputHandler`](struct.OutputHandler.html) type, which is a
//! [`MultiGlobalHandler`](../environment/trait.MultiGlobalHandler.html) for
//! use with the [`init_environment!`](../macro.init_environment.html) macro. It is automatically
//! included if you use the [`new_default_environment!`](../macro.new_default_environment.html).
//!
//! The second is the [`with_output_info`](fn.with_output_info.html) with allows you to
//! access the information associated to this output, as an [`OutputInfo`](struct.OutputInfo.html).
use std::{
cell::RefCell,
rc::{self, Rc},
sync::{self, Arc, Mutex},
};
use wayland_client::{
protocol::{
wl_output::{self, Event, WlOutput},
wl_registry,
},
Attached, DispatchData, Main,
};
pub use wayland_client::protocol::wl_output::{Subpixel, Transform};
/// A possible mode for an output
#[derive(Copy, Clone, Debug)]
pub struct Mode {
/// Number of pixels of this mode in format `(width, height)`
///
/// for example `(1920, 1080)`
pub dimensions: (i32, i32),
/// Refresh rate for this mode, in mHz
pub refresh_rate: i32,
/// Whether this is the current mode for this output
pub is_current: bool,
/// Whether this is the preferred mode for this output
pub is_preferred: bool,
}
#[derive(Clone, Debug)]
/// Compiled information about an output
pub struct OutputInfo {
/// The ID of this output as a global
pub id: u32,
/// The model name of this output as advertised by the server
pub model: String,
/// The make name of this output as advertised by the server
pub make: String,
/// Location of the top-left corner of this output in compositor
/// space
///
/// Note that the compositor may decide to always report (0,0) if
/// it decides clients are not allowed to know this information.
pub location: (i32, i32),
/// Physical dimensions of this output, in unspecified units
pub physical_size: (i32, i32),
/// The subpixel layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) +'static;
/// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically
/// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i!= id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v,.. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb,.. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn | (info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if!found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else {
false
}
});
}
fn notify_status_listeners(
output: &Attached<WlOutput>,
info: &OutputInfo,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
// Notify the callbacks listening for new outputs
listeners.borrow_mut().retain(|lst| {
if let Some(cb) = rc::Weak::upgrade(lst) {
(&mut *cb.borrow_mut())(output.detach(), info, ddata.reborrow());
true
} else {
false
}
})
}
/// Access the info associated with this output
///
/// The provided closure is given the [`OutputInfo`](struct.OutputInfo.html) as argument,
/// and its return value is returned from this function.
///
/// If the provided `WlOutput` has not yet been initialized or is not managed by SCTK, `None` is returned.
///
/// If the output has been removed by the compositor, the `obsolete` field of the `OutputInfo`
/// will be set to `true`. This handler will not automatically detroy the output by calling its
/// `release` method, to avoid interfering with your logic.
pub fn with_output_info<T, F: FnOnce(&OutputInfo) -> T>(output: &WlOutput, f: F) -> Option<T> {
if let Some(ref udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Ready { ref info,.. } => Some(f(info)),
OutputData::Pending {.. } => None,
}
} else {
None
}
}
/// Add a listener to this output
///
/// The provided closure will be called whenever a property of the output changes,
/// including when it is removed by the compositor (in this case it'll be marked as
/// obsolete).
///
/// The returned [`OutputListener`](struct.OutputListener) keeps your callback alive,
/// dropping it will disable the callback and free the closure.
pub fn add_output_listener<F: Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync +'static>(
output: &WlOutput,
f: F,
) -> OutputListener {
let arc = Arc::new(f) as Arc<_>;
if let Some(udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let mut udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Pending { ref mut callbacks,.. } => {
callbacks.push(Arc::downgrade(&arc));
}
OutputData::Ready { ref mut callbacks,.. } => {
callbacks.push(Arc::downgrade(&arc));
}
}
}
OutputListener { _cb: arc }
}
/// A handle to an output listener callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputListener {
_cb: Arc<dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync +'static>,
}
/// A handle to an output status callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputStatusListener {
_cb: Rc<RefCell<OutputStatusCallback>>,
}
/// Trait representing the OutputHandler functions
///
/// Implementing this trait on your inner environment struct used with the
/// [`environment!`](../macro.environment.html) by delegating it to its
/// [`OutputHandler`](struct.OutputHandler.html) field will make available the output-associated
/// method on your [`Environment`](../environment/struct.Environment.html).
pub trait OutputHandling {
/// Insert a listener for output creation and removal events
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&mut self,
f: F,
) -> OutputStatusListener;
}
impl OutputHandling for OutputHandler {
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&mut self,
f: F,
) -> OutputStatusListener {
let rc = Rc::new(RefCell::new(f)) as Rc<_>;
self.status_listeners.borrow_mut().push(Rc::downgrade(&rc));
OutputStatusListener { _cb: rc }
}
}
impl<E: OutputHandling> crate::environment::Environment<E> {
/// Insert a new listener for outputs
///
/// The provided closure will be invoked whenever a `wl_output` is created or removed.
///
/// Note that if outputs already exist when this callback is setup, it'll not be invoked on them.
/// For you to be notified of them as well, you need to first process them manually by calling
/// `.get_all_outputs()`.
///
/// The returned [`OutputStatusListener`](../output/struct.OutputStatusListener.hmtl) keeps your
/// callback alive, dropping it will disable it.
#[must_use = "the returned OutputStatusListener keeps your callback alive, dropping it will disable it"]
pub fn listen_for_outputs<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&self,
f: F,
) -> OutputStatusListener {
self.with_inner(move |inner| OutputHandling::listen(inner, f))
}
}
impl<E: crate::environment::MultiGlobalHandler<WlOutput>> crate::environment::Environment<E> {
/// Shorthand method to retrieve the list of outputs
pub fn get_all_outputs(&self) -> Vec<WlOutput> {
self.get_all_globals::<WlOutput>().into_iter().map(|o| o.detach()).collect()
}
}
| merge_event | identifier_name |
output.rs | //! Types and functions related to graphical outputs
//!
//! This modules provides two main elements. The first is the
//! [`OutputHandler`](struct.OutputHandler.html) type, which is a
//! [`MultiGlobalHandler`](../environment/trait.MultiGlobalHandler.html) for
//! use with the [`init_environment!`](../macro.init_environment.html) macro. It is automatically
//! included if you use the [`new_default_environment!`](../macro.new_default_environment.html).
//!
//! The second is the [`with_output_info`](fn.with_output_info.html) with allows you to
//! access the information associated to this output, as an [`OutputInfo`](struct.OutputInfo.html).
use std::{
cell::RefCell,
rc::{self, Rc},
sync::{self, Arc, Mutex},
};
use wayland_client::{
protocol::{
wl_output::{self, Event, WlOutput},
wl_registry,
},
Attached, DispatchData, Main,
};
pub use wayland_client::protocol::wl_output::{Subpixel, Transform};
/// A possible mode for an output
#[derive(Copy, Clone, Debug)]
pub struct Mode {
/// Number of pixels of this mode in format `(width, height)`
///
/// for example `(1920, 1080)`
pub dimensions: (i32, i32),
/// Refresh rate for this mode, in mHz
pub refresh_rate: i32,
/// Whether this is the current mode for this output
pub is_current: bool,
/// Whether this is the preferred mode for this output
pub is_preferred: bool,
}
#[derive(Clone, Debug)]
/// Compiled information about an output
pub struct OutputInfo {
/// The ID of this output as a global
pub id: u32,
/// The model name of this output as advertised by the server
pub model: String,
/// The make name of this output as advertised by the server
pub make: String,
/// Location of the top-left corner of this output in compositor
/// space
///
/// Note that the compositor may decide to always report (0,0) if
/// it decides clients are not allowed to know this information.
pub location: (i32, i32),
/// Physical dimensions of this output, in unspecified units
pub physical_size: (i32, i32),
/// The subpixel layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) +'static;
| /// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i!= id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v,.. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb,.. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn merge_event(info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if!found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else {
false
}
});
}
fn notify_status_listeners(
output: &Attached<WlOutput>,
info: &OutputInfo,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
// Notify the callbacks listening for new outputs
listeners.borrow_mut().retain(|lst| {
if let Some(cb) = rc::Weak::upgrade(lst) {
(&mut *cb.borrow_mut())(output.detach(), info, ddata.reborrow());
true
} else {
false
}
})
}
/// Access the info associated with this output
///
/// The provided closure is given the [`OutputInfo`](struct.OutputInfo.html) as argument,
/// and its return value is returned from this function.
///
/// If the provided `WlOutput` has not yet been initialized or is not managed by SCTK, `None` is returned.
///
/// If the output has been removed by the compositor, the `obsolete` field of the `OutputInfo`
/// will be set to `true`. This handler will not automatically detroy the output by calling its
/// `release` method, to avoid interfering with your logic.
pub fn with_output_info<T, F: FnOnce(&OutputInfo) -> T>(output: &WlOutput, f: F) -> Option<T> {
if let Some(ref udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Ready { ref info,.. } => Some(f(info)),
OutputData::Pending {.. } => None,
}
} else {
None
}
}
/// Add a listener to this output
///
/// The provided closure will be called whenever a property of the output changes,
/// including when it is removed by the compositor (in this case it'll be marked as
/// obsolete).
///
/// The returned [`OutputListener`](struct.OutputListener) keeps your callback alive,
/// dropping it will disable the callback and free the closure.
pub fn add_output_listener<F: Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync +'static>(
output: &WlOutput,
f: F,
) -> OutputListener {
let arc = Arc::new(f) as Arc<_>;
if let Some(udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let mut udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Pending { ref mut callbacks,.. } => {
callbacks.push(Arc::downgrade(&arc));
}
OutputData::Ready { ref mut callbacks,.. } => {
callbacks.push(Arc::downgrade(&arc));
}
}
}
OutputListener { _cb: arc }
}
/// A handle to an output listener callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputListener {
_cb: Arc<dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync +'static>,
}
/// A handle to an output status callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputStatusListener {
_cb: Rc<RefCell<OutputStatusCallback>>,
}
/// Trait representing the OutputHandler functions
///
/// Implementing this trait on your inner environment struct used with the
/// [`environment!`](../macro.environment.html) by delegating it to its
/// [`OutputHandler`](struct.OutputHandler.html) field will make available the output-associated
/// method on your [`Environment`](../environment/struct.Environment.html).
pub trait OutputHandling {
/// Insert a listener for output creation and removal events
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&mut self,
f: F,
) -> OutputStatusListener;
}
impl OutputHandling for OutputHandler {
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&mut self,
f: F,
) -> OutputStatusListener {
let rc = Rc::new(RefCell::new(f)) as Rc<_>;
self.status_listeners.borrow_mut().push(Rc::downgrade(&rc));
OutputStatusListener { _cb: rc }
}
}
impl<E: OutputHandling> crate::environment::Environment<E> {
/// Insert a new listener for outputs
///
/// The provided closure will be invoked whenever a `wl_output` is created or removed.
///
/// Note that if outputs already exist when this callback is setup, it'll not be invoked on them.
/// For you to be notified of them as well, you need to first process them manually by calling
/// `.get_all_outputs()`.
///
/// The returned [`OutputStatusListener`](../output/struct.OutputStatusListener.hmtl) keeps your
/// callback alive, dropping it will disable it.
#[must_use = "the returned OutputStatusListener keeps your callback alive, dropping it will disable it"]
pub fn listen_for_outputs<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&self,
f: F,
) -> OutputStatusListener {
self.with_inner(move |inner| OutputHandling::listen(inner, f))
}
}
impl<E: crate::environment::MultiGlobalHandler<WlOutput>> crate::environment::Environment<E> {
/// Shorthand method to retrieve the list of outputs
pub fn get_all_outputs(&self) -> Vec<WlOutput> {
self.get_all_globals::<WlOutput>().into_iter().map(|o| o.detach()).collect()
}
} | /// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically | random_line_split |
output.rs | //! Types and functions related to graphical outputs
//!
//! This modules provides two main elements. The first is the
//! [`OutputHandler`](struct.OutputHandler.html) type, which is a
//! [`MultiGlobalHandler`](../environment/trait.MultiGlobalHandler.html) for
//! use with the [`init_environment!`](../macro.init_environment.html) macro. It is automatically
//! included if you use the [`new_default_environment!`](../macro.new_default_environment.html).
//!
//! The second is the [`with_output_info`](fn.with_output_info.html) with allows you to
//! access the information associated to this output, as an [`OutputInfo`](struct.OutputInfo.html).
use std::{
cell::RefCell,
rc::{self, Rc},
sync::{self, Arc, Mutex},
};
use wayland_client::{
protocol::{
wl_output::{self, Event, WlOutput},
wl_registry,
},
Attached, DispatchData, Main,
};
pub use wayland_client::protocol::wl_output::{Subpixel, Transform};
/// A possible mode for an output
#[derive(Copy, Clone, Debug)]
pub struct Mode {
/// Number of pixels of this mode in format `(width, height)`
///
/// for example `(1920, 1080)`
pub dimensions: (i32, i32),
/// Refresh rate for this mode, in mHz
pub refresh_rate: i32,
/// Whether this is the current mode for this output
pub is_current: bool,
/// Whether this is the preferred mode for this output
pub is_preferred: bool,
}
#[derive(Clone, Debug)]
/// Compiled information about an output
pub struct OutputInfo {
/// The ID of this output as a global
pub id: u32,
/// The model name of this output as advertised by the server
pub model: String,
/// The make name of this output as advertised by the server
pub make: String,
/// Location of the top-left corner of this output in compositor
/// space
///
/// Note that the compositor may decide to always report (0,0) if
/// it decides clients are not allowed to know this information.
pub location: (i32, i32),
/// Physical dimensions of this output, in unspecified units
pub physical_size: (i32, i32),
/// The subpixel layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) +'static;
/// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically
/// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i!= id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v,.. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb,.. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn merge_event(info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if!found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else |
});
}
fn notify_status_listeners(
output: &Attached<WlOutput>,
info: &OutputInfo,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
// Notify the callbacks listening for new outputs
listeners.borrow_mut().retain(|lst| {
if let Some(cb) = rc::Weak::upgrade(lst) {
(&mut *cb.borrow_mut())(output.detach(), info, ddata.reborrow());
true
} else {
false
}
})
}
/// Access the info associated with this output
///
/// The provided closure is given the [`OutputInfo`](struct.OutputInfo.html) as argument,
/// and its return value is returned from this function.
///
/// If the provided `WlOutput` has not yet been initialized or is not managed by SCTK, `None` is returned.
///
/// If the output has been removed by the compositor, the `obsolete` field of the `OutputInfo`
/// will be set to `true`. This handler will not automatically detroy the output by calling its
/// `release` method, to avoid interfering with your logic.
pub fn with_output_info<T, F: FnOnce(&OutputInfo) -> T>(output: &WlOutput, f: F) -> Option<T> {
if let Some(ref udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Ready { ref info,.. } => Some(f(info)),
OutputData::Pending {.. } => None,
}
} else {
None
}
}
/// Add a listener to this output
///
/// The provided closure will be called whenever a property of the output changes,
/// including when it is removed by the compositor (in this case it'll be marked as
/// obsolete).
///
/// The returned [`OutputListener`](struct.OutputListener) keeps your callback alive,
/// dropping it will disable the callback and free the closure.
pub fn add_output_listener<F: Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync +'static>(
output: &WlOutput,
f: F,
) -> OutputListener {
let arc = Arc::new(f) as Arc<_>;
if let Some(udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let mut udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Pending { ref mut callbacks,.. } => {
callbacks.push(Arc::downgrade(&arc));
}
OutputData::Ready { ref mut callbacks,.. } => {
callbacks.push(Arc::downgrade(&arc));
}
}
}
OutputListener { _cb: arc }
}
/// A handle to an output listener callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputListener {
_cb: Arc<dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync +'static>,
}
/// A handle to an output status callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputStatusListener {
_cb: Rc<RefCell<OutputStatusCallback>>,
}
/// Trait representing the OutputHandler functions
///
/// Implementing this trait on your inner environment struct used with the
/// [`environment!`](../macro.environment.html) by delegating it to its
/// [`OutputHandler`](struct.OutputHandler.html) field will make available the output-associated
/// method on your [`Environment`](../environment/struct.Environment.html).
pub trait OutputHandling {
/// Insert a listener for output creation and removal events
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&mut self,
f: F,
) -> OutputStatusListener;
}
impl OutputHandling for OutputHandler {
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&mut self,
f: F,
) -> OutputStatusListener {
let rc = Rc::new(RefCell::new(f)) as Rc<_>;
self.status_listeners.borrow_mut().push(Rc::downgrade(&rc));
OutputStatusListener { _cb: rc }
}
}
impl<E: OutputHandling> crate::environment::Environment<E> {
/// Insert a new listener for outputs
///
/// The provided closure will be invoked whenever a `wl_output` is created or removed.
///
/// Note that if outputs already exist when this callback is setup, it'll not be invoked on them.
/// For you to be notified of them as well, you need to first process them manually by calling
/// `.get_all_outputs()`.
///
/// The returned [`OutputStatusListener`](../output/struct.OutputStatusListener.hmtl) keeps your
/// callback alive, dropping it will disable it.
#[must_use = "the returned OutputStatusListener keeps your callback alive, dropping it will disable it"]
pub fn listen_for_outputs<F: FnMut(WlOutput, &OutputInfo, DispatchData) +'static>(
&self,
f: F,
) -> OutputStatusListener {
self.with_inner(move |inner| OutputHandling::listen(inner, f))
}
}
impl<E: crate::environment::MultiGlobalHandler<WlOutput>> crate::environment::Environment<E> {
/// Shorthand method to retrieve the list of outputs
pub fn get_all_outputs(&self) -> Vec<WlOutput> {
self.get_all_globals::<WlOutput>().into_iter().map(|o| o.detach()).collect()
}
}
| {
false
} | conditional_block |
command.rs | use std::fmt::Display;
use GameContext;
use data::Walkability;
use engine::keys::{Key, KeyCode};
use ecs::traits::*;
use graphics::cell::{CellFeature, StairDest, StairDir};
use logic::Action;
use logic::entity::EntityQuery;
use point::{Direction, Point};
use world::traits::*;
use world::{self, World};
use super::debug_command::*;
pub type CommandResult<T> = Result<T, CommandError>;
pub enum CommandError {
Bug(&'static str),
Invalid(&'static str),
Debug(String),
Cancel,
}
/// A bindable command that can be executed by the player.
pub enum Command {
Move(Direction),
UseStairs(StairDir),
Look,
Pickup,
Drop,
Inventory,
Wait,
Quit,
DebugMenu,
Teleport,
}
impl From<Key> for Command {
fn from(key: Key) -> Command {
match key {
Key { code: KeyCode::Escape,.. } => Command::Quit,
Key { code: KeyCode::Left,.. } |
Key { code: KeyCode::H,.. } |
Key { code: KeyCode::NumPad4,.. } => Command::Move(Direction::W),
Key { code: KeyCode::Right,.. } |
Key { code: KeyCode::L,.. } |
Key { code: KeyCode::NumPad6,.. } => Command::Move(Direction::E),
Key { code: KeyCode::Up,.. } |
Key { code: KeyCode::K,.. } |
Key { code: KeyCode::NumPad8,.. } => Command::Move(Direction::N),
Key { code: KeyCode::Down,.. } |
Key { code: KeyCode::J,.. } |
Key { code: KeyCode::NumPad2,.. } => Command::Move(Direction::S),
Key { code: KeyCode::B,.. } |
Key { code: KeyCode::NumPad1,.. } => Command::Move(Direction::SW),
Key { code: KeyCode::N,.. } |
Key { code: KeyCode::NumPad3,.. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y,.. } |
Key { code: KeyCode::NumPad7,.. } => Command::Move(Direction::NW),
Key { code: KeyCode::U,.. } |
Key { code: KeyCode::NumPad9,.. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period,.. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma,.. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M,.. } => Command::Look,
Key { code: KeyCode::G,.. } => Command::Pickup,
Key { code: KeyCode::D,.. } => Command::Drop,
Key { code: KeyCode::I,.. } => Command::Inventory,
Key { code: KeyCode::E,.. } => Command::Teleport,
Key { code: KeyCode::F1,.. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> |
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir!= dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn draw_line(start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
}
use renderer::ui::layers::ChoiceLayer;
pub fn menu_choice(context: &mut GameContext, choices: Vec<String>) -> Option<usize> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut ChoiceLayer::new(choices))
})
}
pub fn menu_choice_indexed<T: Display + Clone>(
context: &mut GameContext,
mut choices: Vec<T>,
) -> CommandResult<T> {
let strings = choices.iter().cloned().map(|t| t.to_string()).collect();
let idx = menu_choice(context, strings).ok_or(CommandError::Cancel)?;
Ok(choices.remove(idx))
}
use renderer::ui::layers::InputLayer;
pub fn player_input(context: &mut GameContext, prompt: &str) -> Option<String> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut InputLayer::new(prompt))
})
}
| {
select_tile(context, maybe_examine_tile).map(|_| ())
} | identifier_body |
command.rs | use std::fmt::Display;
use GameContext;
use data::Walkability;
use engine::keys::{Key, KeyCode};
use ecs::traits::*;
use graphics::cell::{CellFeature, StairDest, StairDir};
use logic::Action;
use logic::entity::EntityQuery;
use point::{Direction, Point};
use world::traits::*;
use world::{self, World};
use super::debug_command::*;
pub type CommandResult<T> = Result<T, CommandError>;
pub enum CommandError {
Bug(&'static str),
Invalid(&'static str),
Debug(String),
Cancel,
}
/// A bindable command that can be executed by the player.
pub enum Command {
Move(Direction),
UseStairs(StairDir),
Look,
Pickup,
Drop,
Inventory,
Wait,
Quit,
DebugMenu,
Teleport,
}
impl From<Key> for Command {
fn from(key: Key) -> Command {
match key {
Key { code: KeyCode::Escape,.. } => Command::Quit,
Key { code: KeyCode::Left,.. } |
Key { code: KeyCode::H,.. } |
Key { code: KeyCode::NumPad4,.. } => Command::Move(Direction::W),
Key { code: KeyCode::Right,.. } |
Key { code: KeyCode::L,.. } |
Key { code: KeyCode::NumPad6,.. } => Command::Move(Direction::E),
Key { code: KeyCode::Up,.. } |
Key { code: KeyCode::K,.. } |
Key { code: KeyCode::NumPad8,.. } => Command::Move(Direction::N),
Key { code: KeyCode::Down,.. } |
Key { code: KeyCode::J,.. } |
Key { code: KeyCode::NumPad2,.. } => Command::Move(Direction::S),
Key { code: KeyCode::B,.. } |
Key { code: KeyCode::NumPad1,.. } => Command::Move(Direction::SW),
Key { code: KeyCode::N,.. } |
Key { code: KeyCode::NumPad3,.. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y,.. } |
Key { code: KeyCode::NumPad7,.. } => Command::Move(Direction::NW),
Key { code: KeyCode::U,.. } |
Key { code: KeyCode::NumPad9,.. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period,.. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma,.. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M,.. } => Command::Look,
Key { code: KeyCode::G,.. } => Command::Pickup,
Key { code: KeyCode::D,.. } => Command::Drop,
Key { code: KeyCode::I,.. } => Command::Inventory,
Key { code: KeyCode::E,.. } => Command::Teleport,
Key { code: KeyCode::F1,.. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> {
select_tile(context, maybe_examine_tile).map(|_| ())
}
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir!= dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn draw_line(start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
} |
pub fn menu_choice(context: &mut GameContext, choices: Vec<String>) -> Option<usize> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut ChoiceLayer::new(choices))
})
}
pub fn menu_choice_indexed<T: Display + Clone>(
context: &mut GameContext,
mut choices: Vec<T>,
) -> CommandResult<T> {
let strings = choices.iter().cloned().map(|t| t.to_string()).collect();
let idx = menu_choice(context, strings).ok_or(CommandError::Cancel)?;
Ok(choices.remove(idx))
}
use renderer::ui::layers::InputLayer;
pub fn player_input(context: &mut GameContext, prompt: &str) -> Option<String> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut InputLayer::new(prompt))
})
} |
use renderer::ui::layers::ChoiceLayer; | random_line_split |
command.rs | use std::fmt::Display;
use GameContext;
use data::Walkability;
use engine::keys::{Key, KeyCode};
use ecs::traits::*;
use graphics::cell::{CellFeature, StairDest, StairDir};
use logic::Action;
use logic::entity::EntityQuery;
use point::{Direction, Point};
use world::traits::*;
use world::{self, World};
use super::debug_command::*;
pub type CommandResult<T> = Result<T, CommandError>;
pub enum CommandError {
Bug(&'static str),
Invalid(&'static str),
Debug(String),
Cancel,
}
/// A bindable command that can be executed by the player.
pub enum Command {
Move(Direction),
UseStairs(StairDir),
Look,
Pickup,
Drop,
Inventory,
Wait,
Quit,
DebugMenu,
Teleport,
}
impl From<Key> for Command {
fn from(key: Key) -> Command {
match key {
Key { code: KeyCode::Escape,.. } => Command::Quit,
Key { code: KeyCode::Left,.. } |
Key { code: KeyCode::H,.. } |
Key { code: KeyCode::NumPad4,.. } => Command::Move(Direction::W),
Key { code: KeyCode::Right,.. } |
Key { code: KeyCode::L,.. } |
Key { code: KeyCode::NumPad6,.. } => Command::Move(Direction::E),
Key { code: KeyCode::Up,.. } |
Key { code: KeyCode::K,.. } |
Key { code: KeyCode::NumPad8,.. } => Command::Move(Direction::N),
Key { code: KeyCode::Down,.. } |
Key { code: KeyCode::J,.. } |
Key { code: KeyCode::NumPad2,.. } => Command::Move(Direction::S),
Key { code: KeyCode::B,.. } |
Key { code: KeyCode::NumPad1,.. } => Command::Move(Direction::SW),
Key { code: KeyCode::N,.. } |
Key { code: KeyCode::NumPad3,.. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y,.. } |
Key { code: KeyCode::NumPad7,.. } => Command::Move(Direction::NW),
Key { code: KeyCode::U,.. } |
Key { code: KeyCode::NumPad9,.. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period,.. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma,.. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M,.. } => Command::Look,
Key { code: KeyCode::G,.. } => Command::Pickup,
Key { code: KeyCode::D,.. } => Command::Drop,
Key { code: KeyCode::I,.. } => Command::Inventory,
Key { code: KeyCode::E,.. } => Command::Teleport,
Key { code: KeyCode::F1,.. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> {
select_tile(context, maybe_examine_tile).map(|_| ())
}
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir!= dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn | (start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
}
use renderer::ui::layers::ChoiceLayer;
pub fn menu_choice(context: &mut GameContext, choices: Vec<String>) -> Option<usize> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut ChoiceLayer::new(choices))
})
}
pub fn menu_choice_indexed<T: Display + Clone>(
context: &mut GameContext,
mut choices: Vec<T>,
) -> CommandResult<T> {
let strings = choices.iter().cloned().map(|t| t.to_string()).collect();
let idx = menu_choice(context, strings).ok_or(CommandError::Cancel)?;
Ok(choices.remove(idx))
}
use renderer::ui::layers::InputLayer;
pub fn player_input(context: &mut GameContext, prompt: &str) -> Option<String> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut InputLayer::new(prompt))
})
}
| draw_line | identifier_name |
main.rs | 32 => '$',
_ => return None,
};
return Some(res)
}
}
}
make_encode_decode!{
0 => 'a';
1 => 'e';
2 => 'i';
3 => 'o';
4 => 'r';
5 => 'n';
6 => 'l';
7 =>'s';
8 => 't';
9 => 'u';
10 => 'p';
11 => 'c';
12 => 'd';
13 => 'k';
14 => 'y';
15 => 'g';
16 => 'h';
17 => 'b';
18 => 'v';
19 => 'f';
20 => 'w';
21 => 'z';
22 => 'j';
23 => 'x';
24 => '\'';
25 => '-';
26 => 'è';
27 => 'ê';
28 => 'ñ';
29 => 'é';
30 =>'m';
31 => 'q';
}
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
struct CharSet {
pub internal:u32
}
impl CharSet {
fn new(internal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if!first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
| .arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn
filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len()!= 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words_index(
f_in: impl BufRead,
ignore_unencodeable: bool,
) -> io::Result<(u32, u32, WordIndex)> {
let mut index = WordIndex::default();
let mut count_row_words = 0;
#[cfg(not(feature = "square"))]
let mut count_col_words = 0;
let lines = f_in.lines();
for line_result in lines {
let word = line_result?;
let chars:Vec<char> = word.chars().collect();
if chars.len()!= WORD_SQUARE_WIDTH && chars.len()!= WORD_SQUARE_HEIGHT { continue }
let mut codes = Vec::new();
let mut all_encoded = true;
for c in chars.clone() {
match encode(c) {
Some(code) => codes.push(code),
None => {
all_encoded = false;
continue
},
}
}
if!all_encoded {
if!ignore_unencodeable {
eprintln!("Skipping {:?}, not all could be encoded",chars);
}
continue
}
if codes.len() == WORD_SQUARE_WIDTH {
count_row_words += 1;
let words_index = index.rows_mut();
let mut word = WideWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_WIDTH {
let i = (WORD_SQUARE_WIDTH - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if!words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
#[cfg(not(feature = "square"))]
if codes.len() == WORD_SQUARE_HEIGHT {
count_col_words += 1;
let words_index = index.cols_mut();
let mut word = TallWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_HEIGHT {
let i = (WORD_SQUARE_HEIGHT - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if!words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
}
#[cfg(feature = "square")]
let count_col_words = count_row_words;
return Ok((count_row_words, count_col_words, index));
}
fn compute_command(args:&ArgMatches) -> io::Result<()> {
let loud =!args.is_present("quiet");
let ignore_empty_wordlist = args.is_present("ignore-empty-wordlist");
let ignore_unencodeable = args.is_present("ignore-unencodeable");
if loud {
eprintln!("Word square order is {}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT);
eprintln!("Start: creating index.");
}
let num_threads:u32 = args.value_of("threads").unwrap().parse().unwrap();
let plain_f = File::open(args.value_of("wordlist").unwrap())?;
let f = BufReader::new(plain_f);
let (count_row_words, count_col_words, index) = make_words_index(f, ignore_unencodeable)?;
if!ignore_empty_wordlist && (index.rows().is_empty() || index.cols().is_empty()) {
panic!("No words in wordlist!");
}
if loud {
eprintln!("Finished creating index, {} words x {} words.", count_row_words, count_col_words);
}
let (m2w_tx, m2w_rx) = spmc::channel::<(WordSquare,u8)>();
let (w2m_tx, w2m_rx) = std::sync::mpsc::sync_channel(16);
let mut worker_handles = Vec::new();
if loud {
eprintln!("Creating {} worker threads.", num_threads);
}
let index_arc = std::sync::Arc::new(index);
for _ in 0..num_threads {
let rxc = m2w_rx.clone();
let txc = w2m_tx.clone();
let my_index = std::sync::Arc::clone(&index_arc);
worker_handles.push(
thread::spawn( move || {
while let Ok(msg) = rxc.recv() {
compute(
&my_index,
msg.0,
msg.1,
WORD_SQUARE_SIZE as u8,
|a,b| txc.send((a,b)).unwrap()
);
}
})
);
}
drop(w2m_tx);
let printing_thread = thread::spawn(move || {
while let Ok(msg) = w2m_rx.recv() {
print_word_square(msg.0);
}
});
let code_array = [255u8; WORD_SQUARE_SIZE];
if loud {
eprintln!("Starting.");
}
compute(
index_arc.as_ref(),
code_array,
0u8,
WORD_SQUARE_WIDTH as u8,
|ca, idx| m2w_tx.send((ca,idx)).unwrap()
);
drop(m2w_tx);
//println!("Dropped");
for h in worker_handles {
h.join().unwrap();
//println!("Worker finished");
}
printing_thread.join().unwrap();
//println!("printing thread finished");
/*let mut char_counts:Vec<(char,u64)> = unused_chars.drain().collect();
char_counts.sort_unstable_by_key(|t| t.1);
for (k,v) in char_counts.iter() {
println!("Char {:?} had {} instances", k, v);
}*/
Ok(())
}
const DEBUG_MODE:bool = false;
fn compute<T:FnMut(WordSquare,u8)>(
words_index_arg:&WordIndex,
mut code_array:WordSquare,
start_idx:u8,
target_idx:u8,
mut on_result:T,
) {
let mut at_idx = start_idx;
let mut charset_array = [CharSet::new(std::u32::MAX); WORD_SQUARE_SIZE];
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
// wrap to go from 0 to 255
let end_idx = start_idx.wrapping_sub(1);
while at_idx!= end_idx {
// wrap to go from 255 (initial) to 0
if DEBUG_MODE {
println!();
println!(
"idx {} before wrapping add is {}",
at_idx,
code_array[at_idx as usize]
);
}
code_array[at_idx as usize] = code_array[at_idx as usize].wrapping_add(1);
if DEBUG_MODE {
| let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
) | identifier_body |
main.rs | 32 => '$',
_ => return None,
};
return Some(res)
}
}
}
make_encode_decode!{
0 => 'a';
1 => 'e';
2 => 'i';
3 => 'o';
4 => 'r';
5 => 'n';
6 => 'l';
7 =>'s';
8 => 't';
9 => 'u';
10 => 'p';
11 => 'c';
12 => 'd';
13 => 'k';
14 => 'y';
15 => 'g';
16 => 'h';
17 => 'b';
18 => 'v';
19 => 'f';
20 => 'w';
21 => 'z';
22 => 'j';
23 => 'x';
24 => '\'';
25 => '-';
26 => 'è';
27 => 'ê';
28 => 'ñ';
29 => 'é';
30 =>'m';
31 => 'q';
}
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
struct CharSet {
pub internal:u32
}
impl CharSet {
fn new(internal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if!first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!()) | .setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len()!= 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words_index(
f_in: impl BufRead,
ignore_unencodeable: bool,
) -> io::Result<(u32, u32, WordIndex)> {
let mut index = WordIndex::default();
let mut count_row_words = 0;
#[cfg(not(feature = "square"))]
let mut count_col_words = 0;
let lines = f_in.lines();
for line_result in lines {
let word = line_result?;
let chars:Vec<char> = word.chars().collect();
if chars.len()!= WORD_SQUARE_WIDTH && chars.len()!= WORD_SQUARE_HEIGHT { continue }
let mut codes = Vec::new();
let mut all_encoded = true;
for c in chars.clone() {
match encode(c) {
Some(code) => codes.push(code),
None => {
all_encoded = false;
continue
},
}
}
if!all_encoded {
if!ignore_unencodeable {
eprintln!("Skipping {:?}, not all could be encoded",chars);
}
continue
}
if codes.len() == WORD_SQUARE_WIDTH {
count_row_words += 1;
let words_index = index.rows_mut();
let mut word = WideWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_WIDTH {
let i = (WORD_SQUARE_WIDTH - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if!words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
#[cfg(not(feature = "square"))]
if codes.len() == WORD_SQUARE_HEIGHT {
count_col_words += 1;
let words_index = index.cols_mut();
let mut word = TallWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_HEIGHT {
let i = (WORD_SQUARE_HEIGHT - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if!words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
}
#[cfg(feature = "square")]
let count_col_words = count_row_words;
return Ok((count_row_words, count_col_words, index));
}
fn compute_command(args:&ArgMatches) -> io::Result<()> {
let loud =!args.is_present("quiet");
let ignore_empty_wordlist = args.is_present("ignore-empty-wordlist");
let ignore_unencodeable = args.is_present("ignore-unencodeable");
if loud {
eprintln!("Word square order is {}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT);
eprintln!("Start: creating index.");
}
let num_threads:u32 = args.value_of("threads").unwrap().parse().unwrap();
let plain_f = File::open(args.value_of("wordlist").unwrap())?;
let f = BufReader::new(plain_f);
let (count_row_words, count_col_words, index) = make_words_index(f, ignore_unencodeable)?;
if!ignore_empty_wordlist && (index.rows().is_empty() || index.cols().is_empty()) {
panic!("No words in wordlist!");
}
if loud {
eprintln!("Finished creating index, {} words x {} words.", count_row_words, count_col_words);
}
let (m2w_tx, m2w_rx) = spmc::channel::<(WordSquare,u8)>();
let (w2m_tx, w2m_rx) = std::sync::mpsc::sync_channel(16);
let mut worker_handles = Vec::new();
if loud {
eprintln!("Creating {} worker threads.", num_threads);
}
let index_arc = std::sync::Arc::new(index);
for _ in 0..num_threads {
let rxc = m2w_rx.clone();
let txc = w2m_tx.clone();
let my_index = std::sync::Arc::clone(&index_arc);
worker_handles.push(
thread::spawn( move || {
while let Ok(msg) = rxc.recv() {
compute(
&my_index,
msg.0,
msg.1,
WORD_SQUARE_SIZE as u8,
|a,b| txc.send((a,b)).unwrap()
);
}
})
);
}
drop(w2m_tx);
let printing_thread = thread::spawn(move || {
while let Ok(msg) = w2m_rx.recv() {
print_word_square(msg.0);
}
});
let code_array = [255u8; WORD_SQUARE_SIZE];
if loud {
eprintln!("Starting.");
}
compute(
index_arc.as_ref(),
code_array,
0u8,
WORD_SQUARE_WIDTH as u8,
|ca, idx| m2w_tx.send((ca,idx)).unwrap()
);
drop(m2w_tx);
//println!("Dropped");
for h in worker_handles {
h.join().unwrap();
//println!("Worker finished");
}
printing_thread.join().unwrap();
//println!("printing thread finished");
/*let mut char_counts:Vec<(char,u64)> = unused_chars.drain().collect();
char_counts.sort_unstable_by_key(|t| t.1);
for (k,v) in char_counts.iter() {
println!("Char {:?} had {} instances", k, v);
}*/
Ok(())
}
const DEBUG_MODE:bool = false;
fn compute<T:FnMut(WordSquare,u8)>(
words_index_arg:&WordIndex,
mut code_array:WordSquare,
start_idx:u8,
target_idx:u8,
mut on_result:T,
) {
let mut at_idx = start_idx;
let mut charset_array = [CharSet::new(std::u32::MAX); WORD_SQUARE_SIZE];
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
// wrap to go from 0 to 255
let end_idx = start_idx.wrapping_sub(1);
while at_idx!= end_idx {
// wrap to go from 255 (initial) to 0
if DEBUG_MODE {
println!();
println!(
"idx {} before wrapping add is {}",
at_idx,
code_array[at_idx as usize]
);
}
code_array[at_idx as usize] = code_array[at_idx as usize].wrapping_add(1);
if DEBUG_MODE {
| .author(crate_authors!())
.about(crate_description!()) | random_line_split |
main.rs | 32 => '$',
_ => return None,
};
return Some(res)
}
}
}
make_encode_decode!{
0 => 'a';
1 => 'e';
2 => 'i';
3 => 'o';
4 => 'r';
5 => 'n';
6 => 'l';
7 =>'s';
8 => 't';
9 => 'u';
10 => 'p';
11 => 'c';
12 => 'd';
13 => 'k';
14 => 'y';
15 => 'g';
16 => 'h';
17 => 'b';
18 => 'v';
19 => 'f';
20 => 'w';
21 => 'z';
22 => 'j';
23 => 'x';
24 => '\'';
25 => '-';
26 => 'è';
27 => 'ê';
28 => 'ñ';
29 => 'é';
30 =>'m';
31 => 'q';
}
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
struct CharSet {
pub internal:u32
}
impl CharSet {
fn new( | ernal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if!first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len()!= 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words_index(
f_in: impl BufRead,
ignore_unencodeable: bool,
) -> io::Result<(u32, u32, WordIndex)> {
let mut index = WordIndex::default();
let mut count_row_words = 0;
#[cfg(not(feature = "square"))]
let mut count_col_words = 0;
let lines = f_in.lines();
for line_result in lines {
let word = line_result?;
let chars:Vec<char> = word.chars().collect();
if chars.len()!= WORD_SQUARE_WIDTH && chars.len()!= WORD_SQUARE_HEIGHT { continue }
let mut codes = Vec::new();
let mut all_encoded = true;
for c in chars.clone() {
match encode(c) {
Some(code) => codes.push(code),
None => {
all_encoded = false;
continue
},
}
}
if!all_encoded {
if!ignore_unencodeable {
eprintln!("Skipping {:?}, not all could be encoded",chars);
}
continue
}
if codes.len() == WORD_SQUARE_WIDTH {
count_row_words += 1;
let words_index = index.rows_mut();
let mut word = WideWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_WIDTH {
let i = (WORD_SQUARE_WIDTH - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if!words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
#[cfg(not(feature = "square"))]
if codes.len() == WORD_SQUARE_HEIGHT {
count_col_words += 1;
let words_index = index.cols_mut();
let mut word = TallWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_HEIGHT {
let i = (WORD_SQUARE_HEIGHT - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if!words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
}
#[cfg(feature = "square")]
let count_col_words = count_row_words;
return Ok((count_row_words, count_col_words, index));
}
fn compute_command(args:&ArgMatches) -> io::Result<()> {
let loud =!args.is_present("quiet");
let ignore_empty_wordlist = args.is_present("ignore-empty-wordlist");
let ignore_unencodeable = args.is_present("ignore-unencodeable");
if loud {
eprintln!("Word square order is {}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT);
eprintln!("Start: creating index.");
}
let num_threads:u32 = args.value_of("threads").unwrap().parse().unwrap();
let plain_f = File::open(args.value_of("wordlist").unwrap())?;
let f = BufReader::new(plain_f);
let (count_row_words, count_col_words, index) = make_words_index(f, ignore_unencodeable)?;
if!ignore_empty_wordlist && (index.rows().is_empty() || index.cols().is_empty()) {
panic!("No words in wordlist!");
}
if loud {
eprintln!("Finished creating index, {} words x {} words.", count_row_words, count_col_words);
}
let (m2w_tx, m2w_rx) = spmc::channel::<(WordSquare,u8)>();
let (w2m_tx, w2m_rx) = std::sync::mpsc::sync_channel(16);
let mut worker_handles = Vec::new();
if loud {
eprintln!("Creating {} worker threads.", num_threads);
}
let index_arc = std::sync::Arc::new(index);
for _ in 0..num_threads {
let rxc = m2w_rx.clone();
let txc = w2m_tx.clone();
let my_index = std::sync::Arc::clone(&index_arc);
worker_handles.push(
thread::spawn( move || {
while let Ok(msg) = rxc.recv() {
compute(
&my_index,
msg.0,
msg.1,
WORD_SQUARE_SIZE as u8,
|a,b| txc.send((a,b)).unwrap()
);
}
})
);
}
drop(w2m_tx);
let printing_thread = thread::spawn(move || {
while let Ok(msg) = w2m_rx.recv() {
print_word_square(msg.0);
}
});
let code_array = [255u8; WORD_SQUARE_SIZE];
if loud {
eprintln!("Starting.");
}
compute(
index_arc.as_ref(),
code_array,
0u8,
WORD_SQUARE_WIDTH as u8,
|ca, idx| m2w_tx.send((ca,idx)).unwrap()
);
drop(m2w_tx);
//println!("Dropped");
for h in worker_handles {
h.join().unwrap();
//println!("Worker finished");
}
printing_thread.join().unwrap();
//println!("printing thread finished");
/*let mut char_counts:Vec<(char,u64)> = unused_chars.drain().collect();
char_counts.sort_unstable_by_key(|t| t.1);
for (k,v) in char_counts.iter() {
println!("Char {:?} had {} instances", k, v);
}*/
Ok(())
}
const DEBUG_MODE:bool = false;
fn compute<T:FnMut(WordSquare,u8)>(
words_index_arg:&WordIndex,
mut code_array:WordSquare,
start_idx:u8,
target_idx:u8,
mut on_result:T,
) {
let mut at_idx = start_idx;
let mut charset_array = [CharSet::new(std::u32::MAX); WORD_SQUARE_SIZE];
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
// wrap to go from 0 to 255
let end_idx = start_idx.wrapping_sub(1);
while at_idx!= end_idx {
// wrap to go from 255 (initial) to 0
if DEBUG_MODE {
println!();
println!(
"idx {} before wrapping add is {}",
at_idx,
code_array[at_idx as usize]
);
}
code_array[at_idx as usize] = code_array[at_idx as usize].wrapping_add(1);
if DEBUG_MODE {
| int | identifier_name |
app.rs | use crate::connection;
use crate::debug_adapter_protocol as dap;
use crate::hsp_ext;
use crate::hsprt;
use crate::hspsdk;
use std;
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
const MAIN_THREAD_ID: i64 = 1;
const MAIN_THREAD_NAME: &'static str = "main";
fn threads() -> Vec<dap::Thread> {
vec![dap::Thread {
id: MAIN_THREAD_ID,
name: MAIN_THREAD_NAME.to_owned(),
}]
}
/// グローバル変数からなるスコープの変数参照Id
const GLOBAL_SCOPE_REF: i64 = 1;
/// HSP の変数や変数の要素、あるいは変数をまとめるもの (モジュールなど) を指し示すもの。
#[derive(Clone, Debug)]
pub(crate) enum VarPath {
Globals,
Static(usize),
}
/// Variables reference. VSCode が変数や変数要素を指し示すために使う整数値。
pub(crate) type VarRef = i64;
impl VarPath {
pub fn to_var_ref(&self) -> VarRef {
match *self {
VarPath::Globals => 1,
VarPath::Static(i) => 2 + i as i64,
}
}
pub | r_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints {.. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace {.. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes {.. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect {.. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root);
let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names
.iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState {
file_path,
file_name: Some(file_name),
line,
stopped: true,
};
self.send_pause_event();
}
Action::AfterConnected => {
self.is_connected = true;
self.send_initialized_event();
}
Action::BeforeTerminating => {
self.send_event(dap::Event::Terminated { restart: false });
// サブワーカーを捨てる。
self.hsprt_sender.take();
self.connection_sender.take();
if let Some(_) = self.join_handle.take() {
// NOTE: なぜか終了しないので join しない。
// join_handle.join().unwrap();
}
}
Action::AfterDebugInfoLoaded(debug_info) => {
self.debug_info = Some(debug_info);
self.load_source_map();
}
Action::AfterGetVar { seq, variables } => {
self.send_response(seq, dap::Response::Variables { variables });
}
}
}
}
| fn from_va | identifier_name |
app.rs | use crate::connection;
use crate::debug_adapter_protocol as dap;
use crate::hsp_ext;
use crate::hsprt;
use crate::hspsdk;
use std;
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
const MAIN_THREAD_ID: i64 = 1;
const MAIN_THREAD_NAME: &'static str = "main";
fn threads() -> Vec<dap::Thread> {
vec![dap::Thread {
id: MAIN_THREAD_ID,
name: MAIN_THREAD_NAME.to_owned(),
}]
}
/// グローバル変数からなるスコープの変数参照Id
const GLOBAL_SCOPE_REF: i64 = 1;
/// HSP の変数や変数の要素、あるいは変数をまとめるもの (モジュールなど) を指し示すもの。
#[derive(Clone, Debug)]
pub(crate) enum VarPath {
Globals,
Static(usize),
}
/// Variables reference. VSCode が変数や変数要素を指し示すために使う整数値。
pub(crate) type VarRef = i64;
impl VarPath {
pub fn to_var_ref(&self) -> VarRef {
match *self {
VarPath::Globals => 1,
VarPath::Static(i) => 2 + i as i64,
}
}
pub fn from_var_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints {.. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace {.. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes {.. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect {.. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root);
let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names
.iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => | {
file_path,
file_name: Some(file_name),
line,
stopped: true,
};
self.send_pause_event();
}
Action::AfterConnected => {
self.is_connected = true;
self.send_initialized_event();
}
Action::BeforeTerminating => {
self.send_event(dap::Event::Terminated { restart: false });
// サブワーカーを捨てる。
self.hsprt_sender.take();
self.connection_sender.take();
if let Some(_) = self.join_handle.take() {
// NOTE: なぜか終了しないので join しない。
// join_handle.join().unwrap();
}
}
Action::AfterDebugInfoLoaded(debug_info) => {
self.debug_info = Some(debug_info);
self.load_source_map();
}
Action::AfterGetVar { seq, variables } => {
self.send_response(seq, dap::Response::Variables { variables });
}
}
}
}
| {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState | identifier_body |
app.rs | use crate::connection;
use crate::debug_adapter_protocol as dap;
use crate::hsp_ext;
use crate::hsprt;
use crate::hspsdk;
use std;
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
const MAIN_THREAD_ID: i64 = 1;
const MAIN_THREAD_NAME: &'static str = "main";
fn threads() -> Vec<dap::Thread> {
vec![dap::Thread {
id: MAIN_THREAD_ID,
name: MAIN_THREAD_NAME.to_owned(),
}]
}
/// グローバル変数からなるスコープの変数参照Id
const GLOBAL_SCOPE_REF: i64 = 1;
/// HSP の変数や変数の要素、あるいは変数をまとめるもの (モジュールなど) を指し示すもの。
#[derive(Clone, Debug)]
pub(crate) enum VarPath {
Globals,
Static(usize),
}
/// Variables reference. VSCode が変数や変数要素を指し示すために使う整数値。
pub(crate) type VarRef = i64;
impl VarPath {
pub fn to_var_ref(&self) -> VarRef {
match *self {
VarPath::Globals => 1,
VarPath::Static(i) => 2 + i as i64,
}
}
pub fn from_var_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints {.. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace {.. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes {.. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut {.. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect {.. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root); | .iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState {
file_path,
file_name: Some(file_name),
line,
stopped: true,
};
self.send_pause_event();
}
Action::AfterConnected => {
self.is_connected = true;
self.send_initialized_event();
}
Action::BeforeTerminating => {
self.send_event(dap::Event::Terminated { restart: false });
// サブワーカーを捨てる。
self.hsprt_sender.take();
self.connection_sender.take();
if let Some(_) = self.join_handle.take() {
// NOTE: なぜか終了しないので join しない。
// join_handle.join().unwrap();
}
}
Action::AfterDebugInfoLoaded(debug_info) => {
self.debug_info = Some(debug_info);
self.load_source_map();
}
Action::AfterGetVar { seq, variables } => {
self.send_response(seq, dap::Response::Variables { variables });
}
}
}
} | let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names | random_line_split |
stack.rs | // Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn | (limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
}
| target_record_sp_limit | identifier_name |
stack.rs | // Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) | asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
}
| {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile"); | identifier_body |
stack.rs | // Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax | unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
} | movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)] | random_line_split |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> |
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect +'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
| {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
} | identifier_body |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect +'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>, | set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
} | since: Option<String>, | random_line_split |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect +'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn | <E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
| request | identifier_name |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect +'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication |
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
| {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
} | conditional_block |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal. "\t". $countmatched. "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1]!= "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip =!mismatch_in_pattern && found_mismatch || mismatch_in_pattern &&!found_mismatch;
if!skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch |
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) {
Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
}
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
}
| {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
} | conditional_block |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal. "\t". $countmatched. "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) | // alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1]!= "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip =!mismatch_in_pattern && found_mismatch || mismatch_in_pattern &&!found_mismatch;
if!skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) {
Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
}
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
}
| {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the | identifier_body |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal. "\t". $countmatched. "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1]!= "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip =!mismatch_in_pattern && found_mismatch || mismatch_in_pattern &&!found_mismatch;
if!skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) { | }
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
} | Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
} | random_line_split |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal. "\t". $countmatched. "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn | (fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1]!= "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip =!mismatch_in_pattern && found_mismatch || mismatch_in_pattern &&!found_mismatch;
if!skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) {
Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
}
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
}
| process_fasta | identifier_name |
main.rs | extern crate dotenv;
extern crate iron;
extern crate handlebars;
extern crate handlebars_iron as hbs;
#[macro_use]
extern crate router;
#[cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
}
#[derive(Debug)]
struct AccessToken(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc | let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
let session = SessionStorage::new(SignedCookieBackend::new(b"my_cookie_secret".to_vec()));
chain.link_around(session);
chain.link_after(hbse);
println!("Server start on {}", port);
Iron::new(chain).http(format!("0.0.0.0:{}", port)).expect("Server start process is failed.");
} | })
},
None => HashMap::<String, Json>::new(),
};
| random_line_split |
main.rs | extern crate dotenv;
extern crate iron;
extern crate handlebars;
extern crate handlebars_iron as hbs;
#[macro_use]
extern crate router;
#[cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json | }
}
#[derive(Debug)]
struct AccessToken(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
})
},
None => HashMap::<String, Json>::new(),
};
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
let session = SessionStorage::new(SignedCookieBackend::new(b"my_cookie_secret".to_vec()));
chain.link_around(session);
chain.link_after(hbse);
println!("Server start on {}", port);
Iron::new(chain).http(format!("0.0.0.0:{}", port)).expect("Server start process is failed.");
}
| {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null, | identifier_body |
main.rs | extern crate dotenv;
extern crate iron;
extern crate handlebars;
extern crate handlebars_iron as hbs;
#[macro_use]
extern crate router;
#[cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
}
#[derive(Debug)]
struct | (String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
})
},
None => HashMap::<String, Json>::new(),
};
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
let session = SessionStorage::new(SignedCookieBackend::new(b"my_cookie_secret".to_vec()));
chain.link_around(session);
chain.link_after(hbse);
println!("Server start on {}", port);
Iron::new(chain).http(format!("0.0.0.0:{}", port)).expect("Server start process is failed.");
}
| AccessToken | identifier_name |
endpoint.rs | use std::fmt;
use std::fs;
use std::io::{
self,
Read,
Write,
};
use std::num::ParseIntError;
use std::str;
use super::Driver;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SlotFunction(pub u8);
impl SlotFunction {
pub fn slot(&self) -> u8 {
self.0 >> 3
}
pub fn function(&self) -> u8 {
self.0 & 0x7
}
}
impl fmt::Debug for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SlotFunction")
.field("slot", &(self.0 >> 3))
.field("function", &(self.0 & 0x7))
.finish()
}
}
impl fmt::Display for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:02x}.{}", self.0 >> 3, self.0 & 0x7)
}
}
impl str::FromStr for SlotFunction {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let r = s.as_bytes();
ensure!(r.len() <= 4, "String too long for PCI device.function: {:?}", s);
// short: 0.0, long: 1f.7
let (dev_s, fun_s) = if r.len() == 3 && r[1] == b'.' {
(&s[0..1], &s[2..3])
} else if r.len() == 4 && r[2] == b'.' {
(&s[0..2], &s[3..4])
} else {
bail!("Couldn't find '.' in valid place for PCI device.function: {:?}", s);
};
let dev = with_context!(("invalid PCI device: {}", dev_s),
u8::from_str_radix(dev_s, 16).map_err(|e| e.into())
)?;
let fun = with_context!(("invalid PCI function: {}", fun_s),
Ok(u8::from_str_radix(fun_s, 8)?)
)?;
ensure!(dev < 0x20, "invalid PCI device: {} (too big)", dev);
ensure!(fun <= 0x08, "invalid PCI function: {} (too big)", fun);
Ok(SlotFunction(dev << 3 | fun))
}
}
fn read_trimmed_info_file(ep: PciEndpoint, name: &str) -> crate::AResult<String> {
with_context!(("couldn't read info file {} for PCI device {}", name, ep), {
let mut f = fs::File::open(ep.device_file(name))?;
let mut result = String::new();
f.read_to_string(&mut result)?;
Ok(result.trim().into())
})
}
fn read_hex_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
ensure!(value.starts_with("0x"), "info {} for PCI device {} doesn't start with '0x': {:?}", name, ep, value);
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value[2..], 16)?)
})
}
fn read_decimal_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value, 10)?)
})
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciBus {
pub domain: u16,
pub bus: u8,
}
impl fmt::Display for PciBus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04x}:{:02x}", self.domain, self.bus)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciEndpoint {
pub bus: PciBus,
pub slot_function: SlotFunction,
}
impl PciEndpoint {
fn device_file(&self, name: &str) -> String {
format!("/sys/bus/pci/devices/{}/{}", *self, name)
}
pub fn is_enabled(&self) -> crate::AResult<bool> {
match read_trimmed_info_file(*self, "enable")?.as_str() {
"0" => Ok(false),
"1" => Ok(true),
e => bail!("Invalid 'enable' value {:?} for PCI device {}", e, self),
}
}
pub fn scoped_enable(&self) -> crate::AResult<ScopedEnable> {
if!self.is_enabled()? {
let scoped_enable = ScopedEnable { ep: Some(*self) };
self.enable()?;
Ok(scoped_enable)
} else {
Ok(ScopedEnable { ep: None })
}
}
pub fn enable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: enable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"1")?;
Ok(())
})
}
pub fn disable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: disable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"0")?;
Ok(())
})
}
pub fn vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "vendor", u16::from_str_radix).map(VendorId)
}
pub fn device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "device", u16::from_str_radix).map(DeviceID)
}
pub fn subsystem_vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "subsystem_vendor", u16::from_str_radix).map(VendorId)
}
pub fn subsystem_device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "subsystem_device", u16::from_str_radix).map(DeviceID)
}
pub fn class(&self) -> crate::AResult<Class> {
let v = read_hex_info_file::<u32>(*self, "class", u32::from_str_radix)?;
let class_code = ClassCode((v >> 16) as u8);
let subclass_code = SubClassCode((v >> 8) as u8);
let programming_interface = ProgrammingInterface(v as u8);
Ok(Class{class_code, subclass_code, programming_interface})
}
/// Bridges have a secondary bus (the bus directly connected devices on the other side are on)
pub fn secondary_bus(&self) -> crate::AResult<PciBus> {
let bus = read_decimal_info_file::<u8>(*self, "secondary_bus_number", u8::from_str_radix)?;
Ok(PciBus {
domain: self.bus.domain,
bus,
})
}
pub fn driver(&self) -> crate::AResult<Option<Driver>> {
let link = self.device_file("driver");
match fs::symlink_metadata(&link) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => bail!("Couldn't locate driver for PCI device {}: {}", self, e),
Ok(attr) => if!attr.file_type().is_symlink() {
bail!("driver for PCI device {} not a symlink", self);
},
}
let path = with_context!(("Couldn't follow driver symlink for PCI device {}", self),
Ok(fs::canonicalize(link)?)
)?;
Ok(Some(Driver{path}))
}
}
impl fmt::Display for PciEndpoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.bus, self.slot_function)
}
}
impl str::FromStr for PciEndpoint {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// max len: 0000:00:00.0
// short: 0:0.0
ensure!(s.len() <= 12, "PCI endpoint too long: {:?}", s);
let (domain, bus_s, devfun_s) = {
let mut parts = s.split(':');
let p1 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
let p2 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
match parts.next() {
None => (0, p1, p2),
Some(p3) => {
ensure!(parts.next().is_none(), "At most two ':' in PCI endpoint: {:?}", s);
let domain = with_context!(("invalid PCI domain: {}", p1),
Ok(u16::from_str_radix(p1, 16)?)
)?;
(domain, p2, p3)
}
}
};
let bus = with_context!(("invalid PCI bus: {}", bus_s),
Ok(u8::from_str_radix(bus_s, 16)?)
)?;
let slot_function = devfun_s.parse::<SlotFunction>()?;
let bus = PciBus {
domain,
bus,
};
Ok(PciEndpoint {
bus,
slot_function,
})
}
}
#[derive(Debug)]
pub struct ScopedEnable {
ep: Option<PciEndpoint>, // is none if already "closed" or was already enabled before
}
impl ScopedEnable {
pub fn | (mut self) -> crate::AResult<()> {
if let Some(ep) = self.ep.take() {
ep.disable()?;
}
Ok(())
}
}
impl Drop for ScopedEnable {
fn drop(&mut self) {
if let Some(ep) = self.ep.take() {
if let Err(e) = ep.disable() {
error!("PCI {}: Failed to disable temporarily enabled device: {}", ep, e);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct VendorId(pub u16);
impl fmt::Display for VendorId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DeviceID(pub u16);
impl fmt::Display for DeviceID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct SubClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ProgrammingInterface(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Class {
pub class_code: ClassCode,
pub subclass_code: SubClassCode,
pub programming_interface: ProgrammingInterface,
}
impl fmt::Display for Class {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"0x{:02x}{:02x}{:02x}",
self.class_code.0,
self.subclass_code.0,
self.programming_interface.0,
)
}
}
#[cfg(test)]
mod test {
use super::SlotFunction;
fn check_dev_fun(dev: u8, fun: u8, repr: &str) {
assert!(dev < 0x20);
assert!(fun < 0x08);
match repr.parse::<SlotFunction>() {
Err(e) => panic!("{} failed to parse as SlotFunction: {}", repr, e),
Ok(df) => assert_eq!(SlotFunction(dev << 3 | fun), df, "failed validing parsed {}", repr),
}
}
fn check_dev_fun_canonical(dev: u8, fun: u8, repr: &str) {
check_dev_fun(dev, fun, repr);
assert_eq!(SlotFunction(dev << 3 | fun).to_string(), repr, "failed stringifying dev 0x{:02x} function {}", dev, fun);
}
fn check_invalid_dev_fun(repr: &str) {
assert!(repr.parse::<SlotFunction>().is_err(), "{:?} must not be a valid DEV.FUN");
}
#[test]
fn parse_dev_function() {
check_dev_fun(0b0_0000, 0b000, "0.0");
check_dev_fun_canonical(0b0_0000, 0b000, "00.0");
check_dev_fun_canonical(0b0_0000, 0b001, "00.1");
check_dev_fun_canonical(0b0_0000, 0b111, "00.7");
check_dev_fun_canonical(0b0_0001, 0b000, "01.0");
check_dev_fun_canonical(0b0_0001, 0b001, "01.1");
check_dev_fun_canonical(0b0_0001, 0b111, "01.7");
check_dev_fun_canonical(0b1_0000, 0b000, "10.0");
check_dev_fun_canonical(0b1_0000, 0b111, "10.7");
check_dev_fun_canonical(0b1_1111, 0b011, "1f.3");
check_dev_fun_canonical(0b1_1111, 0b111, "1f.7");
check_invalid_dev_fun("");
check_invalid_dev_fun(".");
check_invalid_dev_fun("0.");
check_invalid_dev_fun("00.");
check_invalid_dev_fun("000.");
check_invalid_dev_fun(".0");
check_invalid_dev_fun(".00");
check_invalid_dev_fun(".000");
check_invalid_dev_fun("0");
check_invalid_dev_fun("00");
check_invalid_dev_fun("000");
check_invalid_dev_fun("0000");
}
}
| close | identifier_name |
endpoint.rs | use std::fmt;
use std::fs;
use std::io::{
self,
Read,
Write,
};
use std::num::ParseIntError;
use std::str;
use super::Driver;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SlotFunction(pub u8);
impl SlotFunction {
pub fn slot(&self) -> u8 {
self.0 >> 3
}
pub fn function(&self) -> u8 {
self.0 & 0x7
}
}
impl fmt::Debug for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SlotFunction")
.field("slot", &(self.0 >> 3))
.field("function", &(self.0 & 0x7))
.finish()
}
}
impl fmt::Display for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:02x}.{}", self.0 >> 3, self.0 & 0x7)
}
}
impl str::FromStr for SlotFunction {
type Err = ::failure::Error;
|
// short: 0.0, long: 1f.7
let (dev_s, fun_s) = if r.len() == 3 && r[1] == b'.' {
(&s[0..1], &s[2..3])
} else if r.len() == 4 && r[2] == b'.' {
(&s[0..2], &s[3..4])
} else {
bail!("Couldn't find '.' in valid place for PCI device.function: {:?}", s);
};
let dev = with_context!(("invalid PCI device: {}", dev_s),
u8::from_str_radix(dev_s, 16).map_err(|e| e.into())
)?;
let fun = with_context!(("invalid PCI function: {}", fun_s),
Ok(u8::from_str_radix(fun_s, 8)?)
)?;
ensure!(dev < 0x20, "invalid PCI device: {} (too big)", dev);
ensure!(fun <= 0x08, "invalid PCI function: {} (too big)", fun);
Ok(SlotFunction(dev << 3 | fun))
}
}
fn read_trimmed_info_file(ep: PciEndpoint, name: &str) -> crate::AResult<String> {
with_context!(("couldn't read info file {} for PCI device {}", name, ep), {
let mut f = fs::File::open(ep.device_file(name))?;
let mut result = String::new();
f.read_to_string(&mut result)?;
Ok(result.trim().into())
})
}
fn read_hex_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
ensure!(value.starts_with("0x"), "info {} for PCI device {} doesn't start with '0x': {:?}", name, ep, value);
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value[2..], 16)?)
})
}
fn read_decimal_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value, 10)?)
})
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciBus {
pub domain: u16,
pub bus: u8,
}
impl fmt::Display for PciBus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04x}:{:02x}", self.domain, self.bus)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciEndpoint {
pub bus: PciBus,
pub slot_function: SlotFunction,
}
impl PciEndpoint {
fn device_file(&self, name: &str) -> String {
format!("/sys/bus/pci/devices/{}/{}", *self, name)
}
pub fn is_enabled(&self) -> crate::AResult<bool> {
match read_trimmed_info_file(*self, "enable")?.as_str() {
"0" => Ok(false),
"1" => Ok(true),
e => bail!("Invalid 'enable' value {:?} for PCI device {}", e, self),
}
}
pub fn scoped_enable(&self) -> crate::AResult<ScopedEnable> {
if!self.is_enabled()? {
let scoped_enable = ScopedEnable { ep: Some(*self) };
self.enable()?;
Ok(scoped_enable)
} else {
Ok(ScopedEnable { ep: None })
}
}
pub fn enable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: enable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"1")?;
Ok(())
})
}
pub fn disable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: disable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"0")?;
Ok(())
})
}
pub fn vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "vendor", u16::from_str_radix).map(VendorId)
}
pub fn device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "device", u16::from_str_radix).map(DeviceID)
}
pub fn subsystem_vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "subsystem_vendor", u16::from_str_radix).map(VendorId)
}
pub fn subsystem_device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "subsystem_device", u16::from_str_radix).map(DeviceID)
}
pub fn class(&self) -> crate::AResult<Class> {
let v = read_hex_info_file::<u32>(*self, "class", u32::from_str_radix)?;
let class_code = ClassCode((v >> 16) as u8);
let subclass_code = SubClassCode((v >> 8) as u8);
let programming_interface = ProgrammingInterface(v as u8);
Ok(Class{class_code, subclass_code, programming_interface})
}
/// Bridges have a secondary bus (the bus directly connected devices on the other side are on)
pub fn secondary_bus(&self) -> crate::AResult<PciBus> {
let bus = read_decimal_info_file::<u8>(*self, "secondary_bus_number", u8::from_str_radix)?;
Ok(PciBus {
domain: self.bus.domain,
bus,
})
}
pub fn driver(&self) -> crate::AResult<Option<Driver>> {
let link = self.device_file("driver");
match fs::symlink_metadata(&link) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => bail!("Couldn't locate driver for PCI device {}: {}", self, e),
Ok(attr) => if!attr.file_type().is_symlink() {
bail!("driver for PCI device {} not a symlink", self);
},
}
let path = with_context!(("Couldn't follow driver symlink for PCI device {}", self),
Ok(fs::canonicalize(link)?)
)?;
Ok(Some(Driver{path}))
}
}
impl fmt::Display for PciEndpoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.bus, self.slot_function)
}
}
impl str::FromStr for PciEndpoint {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// max len: 0000:00:00.0
// short: 0:0.0
ensure!(s.len() <= 12, "PCI endpoint too long: {:?}", s);
let (domain, bus_s, devfun_s) = {
let mut parts = s.split(':');
let p1 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
let p2 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
match parts.next() {
None => (0, p1, p2),
Some(p3) => {
ensure!(parts.next().is_none(), "At most two ':' in PCI endpoint: {:?}", s);
let domain = with_context!(("invalid PCI domain: {}", p1),
Ok(u16::from_str_radix(p1, 16)?)
)?;
(domain, p2, p3)
}
}
};
let bus = with_context!(("invalid PCI bus: {}", bus_s),
Ok(u8::from_str_radix(bus_s, 16)?)
)?;
let slot_function = devfun_s.parse::<SlotFunction>()?;
let bus = PciBus {
domain,
bus,
};
Ok(PciEndpoint {
bus,
slot_function,
})
}
}
#[derive(Debug)]
pub struct ScopedEnable {
ep: Option<PciEndpoint>, // is none if already "closed" or was already enabled before
}
impl ScopedEnable {
pub fn close(mut self) -> crate::AResult<()> {
if let Some(ep) = self.ep.take() {
ep.disable()?;
}
Ok(())
}
}
impl Drop for ScopedEnable {
fn drop(&mut self) {
if let Some(ep) = self.ep.take() {
if let Err(e) = ep.disable() {
error!("PCI {}: Failed to disable temporarily enabled device: {}", ep, e);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct VendorId(pub u16);
impl fmt::Display for VendorId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DeviceID(pub u16);
impl fmt::Display for DeviceID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct SubClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ProgrammingInterface(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Class {
pub class_code: ClassCode,
pub subclass_code: SubClassCode,
pub programming_interface: ProgrammingInterface,
}
impl fmt::Display for Class {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"0x{:02x}{:02x}{:02x}",
self.class_code.0,
self.subclass_code.0,
self.programming_interface.0,
)
}
}
#[cfg(test)]
mod test {
use super::SlotFunction;
fn check_dev_fun(dev: u8, fun: u8, repr: &str) {
assert!(dev < 0x20);
assert!(fun < 0x08);
match repr.parse::<SlotFunction>() {
Err(e) => panic!("{} failed to parse as SlotFunction: {}", repr, e),
Ok(df) => assert_eq!(SlotFunction(dev << 3 | fun), df, "failed validing parsed {}", repr),
}
}
fn check_dev_fun_canonical(dev: u8, fun: u8, repr: &str) {
check_dev_fun(dev, fun, repr);
assert_eq!(SlotFunction(dev << 3 | fun).to_string(), repr, "failed stringifying dev 0x{:02x} function {}", dev, fun);
}
fn check_invalid_dev_fun(repr: &str) {
assert!(repr.parse::<SlotFunction>().is_err(), "{:?} must not be a valid DEV.FUN");
}
#[test]
fn parse_dev_function() {
check_dev_fun(0b0_0000, 0b000, "0.0");
check_dev_fun_canonical(0b0_0000, 0b000, "00.0");
check_dev_fun_canonical(0b0_0000, 0b001, "00.1");
check_dev_fun_canonical(0b0_0000, 0b111, "00.7");
check_dev_fun_canonical(0b0_0001, 0b000, "01.0");
check_dev_fun_canonical(0b0_0001, 0b001, "01.1");
check_dev_fun_canonical(0b0_0001, 0b111, "01.7");
check_dev_fun_canonical(0b1_0000, 0b000, "10.0");
check_dev_fun_canonical(0b1_0000, 0b111, "10.7");
check_dev_fun_canonical(0b1_1111, 0b011, "1f.3");
check_dev_fun_canonical(0b1_1111, 0b111, "1f.7");
check_invalid_dev_fun("");
check_invalid_dev_fun(".");
check_invalid_dev_fun("0.");
check_invalid_dev_fun("00.");
check_invalid_dev_fun("000.");
check_invalid_dev_fun(".0");
check_invalid_dev_fun(".00");
check_invalid_dev_fun(".000");
check_invalid_dev_fun("0");
check_invalid_dev_fun("00");
check_invalid_dev_fun("000");
check_invalid_dev_fun("0000");
}
} | fn from_str(s: &str) -> Result<Self, Self::Err> {
let r = s.as_bytes();
ensure!(r.len() <= 4, "String too long for PCI device.function: {:?}", s); | random_line_split |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size!= 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 |
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size!= 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size!= 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
// A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
}
| {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
} | conditional_block |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size!= 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size!= 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size!= 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn | () {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
// A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
}
| peak_map_heights | identifier_name |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size!= 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size!= 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size!= 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root | // A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
} | assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
| random_line_split |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size!= 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) |
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size!= 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
// A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
}
| {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
} | identifier_body |
lib.rs | //! # python-config-rs
//!
//! Just like the `python3-config` script that's installed
//! with your Python distribution, `python-config-rs` helps you
//! find information about your Python distribution.
//!
//! ```no_run
//! use python_config::PythonConfig;
//!
//! let cfg = PythonConfig::new(); // Python 3
//!
//! // Print include directories
//! println!("Includes: {}", cfg.includes().unwrap());
//! // Print installation prefix
//! println!("Installation prefix: {}", cfg.prefix().unwrap());
//! ```
//!
//! `python-config` may be most useful in your `build.rs`
//! script, or in any application where you need to find
//!
//! - the location of Python libraries
//! - the include directory for Python headers
//! - any of the things available via `python-config`
//!
//! Essentially, this is a reimplementation of the
//! `python3-config` script with a Rust interface. We work
//! directly with your Python interpreter, just in case
//! a `python-config` script is not on your system.
//!
//! We provide a new binary, `python3-config`, in case (for whatever
//! reason) you'd like to use this version of `python3-config`
//! instead of the distribution's script. We have tests that
//! show our script takes the exact same inputs and returns
//! the exact same outputs. Note that the tests only work if
//! you have a Python 3 distribution that includes a
//! `python3-config` script.
//!
//! ## 3 > 2
//!
//! We make the choice for you: by default, we favor Python 3
//! over Python 2. If you need Python 2 support, use the more
//! explicit interface to create the corresponding `PythonConfig`
//! handle. Note that, while the Python 2 interface should work,
//! it's gone through significantly less testing.
//!
//! The `python3-config` binary in this crate is Python 3 only.
mod cmdr;
#[macro_use]
mod script;
use cmdr::SysCommand;
use semver;
use std::io;
use std::path::{self, PathBuf};
/// Selectable Python version
#[derive(PartialEq, Eq, Debug)]
pub enum Version {
/// Python 3
Three,
/// Python 2
Two,
}
/// Describes a few possible errors from the `PythonConfig` interface
#[derive(Debug)]
pub enum Error {
/// An I/O error occured while interfacing the interpreter
IO(io::Error),
/// This function is for Python 3 only
///
/// This will be the return error for methods returning
/// a [`Py3Only<T>`](type.Py3Only.html) type.
Python3Only,
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or
/// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
/// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self |
fn is_py3(&self) -> Result<(), Error> {
if self.ver!= Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn include_paths_same() {
let cfg = PythonConfig::new();
let include_str = cfg.includes().unwrap();
assert!(!include_str.is_empty());
let paths: Vec<PathBuf> = include_str
.split(" ")
.map(|include| {
// Drop the '-I' characters before each path
PathBuf::from(&include[2..])
})
.collect();
let actual = cfg.include_paths().unwrap();
assert_eq!(actual, paths);
}
}
| {
PythonConfig { cmdr, ver }
} | identifier_body |
lib.rs | //! # python-config-rs
//!
//! Just like the `python3-config` script that's installed
//! with your Python distribution, `python-config-rs` helps you
//! find information about your Python distribution.
//!
//! ```no_run
//! use python_config::PythonConfig;
//!
//! let cfg = PythonConfig::new(); // Python 3
//!
//! // Print include directories
//! println!("Includes: {}", cfg.includes().unwrap());
//! // Print installation prefix
//! println!("Installation prefix: {}", cfg.prefix().unwrap());
//! ```
//!
//! `python-config` may be most useful in your `build.rs`
//! script, or in any application where you need to find
//!
//! - the location of Python libraries
//! - the include directory for Python headers
//! - any of the things available via `python-config`
//!
//! Essentially, this is a reimplementation of the
//! `python3-config` script with a Rust interface. We work
//! directly with your Python interpreter, just in case
//! a `python-config` script is not on your system.
//!
//! We provide a new binary, `python3-config`, in case (for whatever
//! reason) you'd like to use this version of `python3-config`
//! instead of the distribution's script. We have tests that
//! show our script takes the exact same inputs and returns
//! the exact same outputs. Note that the tests only work if
//! you have a Python 3 distribution that includes a
//! `python3-config` script.
//!
//! ## 3 > 2
//!
//! We make the choice for you: by default, we favor Python 3
//! over Python 2. If you need Python 2 support, use the more
//! explicit interface to create the corresponding `PythonConfig`
//! handle. Note that, while the Python 2 interface should work,
//! it's gone through significantly less testing.
//!
//! The `python3-config` binary in this crate is Python 3 only.
mod cmdr;
#[macro_use]
mod script;
use cmdr::SysCommand;
use semver;
use std::io;
use std::path::{self, PathBuf};
/// Selectable Python version
#[derive(PartialEq, Eq, Debug)]
pub enum Version {
/// Python 3
Three,
/// Python 2
Two,
}
/// Describes a few possible errors from the `PythonConfig` interface
#[derive(Debug)]
pub enum Error {
/// An I/O error occured while interfacing the interpreter
IO(io::Error),
/// This function is for Python 3 only
///
/// This will be the return error for methods returning
/// a [`Py3Only<T>`](type.Py3Only.html) type.
Python3Only,
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or
/// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
/// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self {
PythonConfig { cmdr, ver }
}
fn is_py3(&self) -> Result<(), Error> {
if self.ver!= Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn | () {
let cfg = PythonConfig::new();
let include_str = cfg.includes().unwrap();
assert!(!include_str.is_empty());
let paths: Vec<PathBuf> = include_str
.split(" ")
.map(|include| {
// Drop the '-I' characters before each path
PathBuf::from(&include[2..])
})
.collect();
let actual = cfg.include_paths().unwrap();
assert_eq!(actual, paths);
}
}
| include_paths_same | identifier_name |
lib.rs | //! # python-config-rs
//!
//! Just like the `python3-config` script that's installed
//! with your Python distribution, `python-config-rs` helps you
//! find information about your Python distribution.
//!
//! ```no_run
//! use python_config::PythonConfig;
//!
//! let cfg = PythonConfig::new(); // Python 3
//!
//! // Print include directories
//! println!("Includes: {}", cfg.includes().unwrap());
//! // Print installation prefix
//! println!("Installation prefix: {}", cfg.prefix().unwrap());
//! ```
//!
//! `python-config` may be most useful in your `build.rs`
//! script, or in any application where you need to find
//!
//! - the location of Python libraries
//! - the include directory for Python headers
//! - any of the things available via `python-config`
//!
//! Essentially, this is a reimplementation of the
//! `python3-config` script with a Rust interface. We work
//! directly with your Python interpreter, just in case
//! a `python-config` script is not on your system.
//!
//! We provide a new binary, `python3-config`, in case (for whatever
//! reason) you'd like to use this version of `python3-config`
//! instead of the distribution's script. We have tests that
//! show our script takes the exact same inputs and returns
//! the exact same outputs. Note that the tests only work if
//! you have a Python 3 distribution that includes a
//! `python3-config` script.
//!
//! ## 3 > 2
//!
//! We make the choice for you: by default, we favor Python 3
//! over Python 2. If you need Python 2 support, use the more
//! explicit interface to create the corresponding `PythonConfig`
//! handle. Note that, while the Python 2 interface should work,
//! it's gone through significantly less testing.
//!
//! The `python3-config` binary in this crate is Python 3 only.
mod cmdr;
#[macro_use]
mod script;
use cmdr::SysCommand;
use semver;
use std::io;
use std::path::{self, PathBuf};
/// Selectable Python version
#[derive(PartialEq, Eq, Debug)]
pub enum Version {
/// Python 3
Three,
/// Python 2
Two,
}
/// Describes a few possible errors from the `PythonConfig` interface
#[derive(Debug)]
pub enum Error {
/// An I/O error occured while interfacing the interpreter
IO(io::Error),
/// This function is for Python 3 only
///
/// This will be the return error for methods returning
/// a [`Py3Only<T>`](type.Py3Only.html) type.
Python3Only,
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or | /// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self {
PythonConfig { cmdr, ver }
}
fn is_py3(&self) -> Result<(), Error> {
if self.ver!= Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn include_paths_same() {
let cfg = PythonConfig::new();
let include_str = cfg.includes().unwrap();
assert!(!include_str.is_empty());
let paths: Vec<PathBuf> = include_str
.split(" ")
.map(|include| {
// Drop the '-I' characters before each path
PathBuf::from(&include[2..])
})
.collect();
let actual = cfg.include_paths().unwrap();
assert_eq!(actual, paths);
}
} | /// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
| random_line_split |
path.rs | //! This module contains code for abstracting object locations that work
//! across different backing implementations and platforms.
use itertools::Itertools;
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet, CONTROLS};
use std::path::PathBuf;
/// Universal interface for handling paths and locations for objects and
/// directories in the object store.
///
/// It allows IOx to be completely decoupled from the underlying object store
/// implementations.
///
/// Deliberately does not implement `Display` or `ToString`! Use one of the
/// converters.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct ObjectStorePath {
parts: Vec<PathPart>,
}
impl ObjectStorePath {
/// For use when receiving a path from an object store API directly, not
/// when building a path. Assumes DELIMITER is the separator.
///
/// TODO: Improve performance by implementing a CoW-type model to delay
/// parsing until needed TODO: This should only be available to cloud
/// storage
pub fn from_cloud_unchecked(path: impl Into<String>) -> Self {
let path = path.into();
Self {
parts: path
.split_terminator(DELIMITER)
.map(|s| PathPart(s.to_string()))
.collect(),
}
}
/// For use when receiving a path from a filesystem directly, not
/// when building a path. Uses the standard library's path splitting
/// implementation to separate into parts.
pub fn from_path_buf_unchecked(path: impl Into<PathBuf>) -> Self {
let path = path.into();
Self {
parts: path
.iter()
.flat_map(|s| s.to_os_string().into_string().map(PathPart))
.collect(),
}
}
/// Add a part to the end of the path, encoding any restricted characters.
pub fn push(&mut self, part: impl Into<String>) {
let part = part.into();
self.parts.push((&*part).into());
}
/// Add a `PathPart` to the end of the path. Infallible because the
/// `PathPart` should already have been checked for restricted
/// characters.
pub fn push_part(&mut self, part: &PathPart) {
self.parts.push(part.to_owned());
}
/// Add the parts of `ObjectStorePath` to the end of the path. Notably does
/// *not* behave as `PathBuf::push` does: no existing part of `self`
/// will be replaced as part of this call.
pub fn push_path(&mut self, path: &Self) {
self.parts.extend_from_slice(&path.parts);
}
/// Push a bunch of parts in one go.
pub fn push_all<'a>(&mut self, parts: impl AsRef<[&'a str]>) {
self.parts.extend(parts.as_ref().iter().map(|&v| v.into()));
}
/// Return the component parts of the path.
pub fn as_parts(&self) -> &[PathPart] {
self.parts.as_ref()
}
/// Pops a part from the path and returns it, or `None` if it's empty.
pub fn pop(&mut self) -> Option<&PathPart> {
unimplemented!()
}
/// Determines whether `prefix` is a prefix of `self`.
pub fn starts_with(&self, prefix: &Self) -> bool {
let diff = itertools::diff_with(self.parts.iter(), prefix.parts.iter(), |a, b| a == b);
match diff {
None => true,
Some(itertools::Diff::Shorter(..)) => true,
Some(itertools::Diff::FirstMismatch(_, mut remaining_self, mut remaining_prefix)) => {
let first_prefix = remaining_prefix.next().expect("must be at least one value");
// there must not be any other remaining parts in the prefix
remaining_prefix.next().is_none()
// and the next item in self must start with the last item in the prefix
&& remaining_self
.next()
.expect("must be at least one value")
.0
.starts_with(&first_prefix.0)
}
_ => false,
}
}
/// Returns delimiter-separated parts contained in `self` after `prefix`.
pub fn parts_after_prefix(&self, _prefix: &Self) -> &[PathPart] {
unimplemented!()
}
}
// TODO: I made these structs rather than functions because I could see
// `convert` being part of a trait, possibly, but that seemed a bit overly
// complex for now.
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in cloud storage.
#[derive(Debug, Clone, Copy)]
pub struct CloudConverter {}
impl CloudConverter {
/// Creates a cloud storage location by joining this `ObjectStorePath`'s
/// parts with `DELIMITER`
pub fn convert(object_store_path: &ObjectStorePath) -> String {
object_store_path.parts.iter().map(|p| &p.0).join(DELIMITER)
}
}
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in filesystem storage.
#[derive(Debug, Clone, Copy)]
pub struct | {}
impl FileConverter {
/// Creates a filesystem `PathBuf` location by using the standard library's
/// `PathBuf` building implementation appropriate for the current
/// platform.
pub fn convert(object_store_path: &ObjectStorePath) -> PathBuf {
object_store_path.parts.iter().map(|p| &p.0).collect()
}
}
/// The delimiter to separate object namespaces, creating a directory structure.
pub const DELIMITER: &str = "/";
// percent_encode's API needs this as a byte
const DELIMITER_BYTE: u8 = DELIMITER.as_bytes()[0];
/// The PathPart type exists to validate the directory/file names that form part
/// of a path.
///
/// A PathPart instance is guaranteed to contain no `/` characters as it can
/// only be constructed by going through the `try_from` impl.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct PathPart(String);
/// Characters we want to encode.
const INVALID: &AsciiSet = &CONTROLS
// The delimiter we are reserving for internal hierarchy
.add(DELIMITER_BYTE)
// Characters AWS recommends avoiding for object keys
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
.add(b'\\')
.add(b'{')
// TODO: Non-printable ASCII characters (128–255 decimal characters)
.add(b'^')
.add(b'}')
.add(b'%')
.add(b'`')
.add(b']')
.add(b'"') // " <-- my editor is confused about double quotes within single quotes
.add(b'>')
.add(b'[')
.add(b'~')
.add(b'<')
.add(b'#')
.add(b'|')
// Characters Google Cloud Storage recommends avoiding for object names
// https://cloud.google.com/storage/docs/naming-objects
.add(b'\r')
.add(b'\n')
.add(b'*')
.add(b'?');
impl From<&str> for PathPart {
fn from(v: &str) -> Self {
match v {
// We don't want to encode `.` generally, but we do want to disallow parts of paths
// to be equal to `.` or `..` to prevent file system traversal shenanigans.
"." => Self(String::from("%2E")),
".." => Self(String::from("%2E%2E")),
other => Self(percent_encode(other.as_bytes(), INVALID).to_string()),
}
}
}
impl std::fmt::Display for PathPart {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
percent_decode_str(&self.0)
.decode_utf8()
.expect("Valid UTF-8 that came from String")
.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn path_part_delimiter_gets_encoded() {
let part: PathPart = "foo/bar".into();
assert_eq!(part, PathPart(String::from("foo%2Fbar")));
}
#[test]
fn path_part_gets_decoded_for_display() {
let part: PathPart = "foo/bar".into();
assert_eq!(part.to_string(), "foo/bar");
}
#[test]
fn path_part_given_already_encoded_string() {
let part: PathPart = "foo%2Fbar".into();
assert_eq!(part, PathPart(String::from("foo%252Fbar")));
assert_eq!(part.to_string(), "foo%2Fbar");
}
#[test]
fn path_part_cant_be_one_dot() {
let part: PathPart = ".".into();
assert_eq!(part, PathPart(String::from("%2E")));
assert_eq!(part.to_string(), ".");
}
#[test]
fn path_part_cant_be_two_dots() {
let part: PathPart = "..".into();
assert_eq!(part, PathPart(String::from("%2E%2E")));
assert_eq!(part.to_string(), "..");
}
// Invariants to maintain/document/test:
//
// - always ends in DELIMITER if it's a directory. If it's the end object, it
// should have some sort of file extension like.parquet,.json, or.segment
// - does not contain unencoded DELIMITER
// - for file paths: does not escape root dir
// - for object storage: looks like directories
// - Paths that come from object stores directly don't need to be
// parsed/validated
// - Within a process, the same backing store will always be used
//
#[test]
fn cloud_prefix_no_trailing_delimiter_or_filename() {
// Use case: a file named `test_file.json` exists in object storage and it
// should be returned for a search on prefix `test`, so the prefix path
// should not get a trailing delimiter automatically added
let mut prefix = ObjectStorePath::default();
prefix.push("test");
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test");
}
#[test]
fn cloud_prefix_with_trailing_delimiter() {
// Use case: files exist in object storage named `foo/bar.json` and
// `foo_test.json`. A search for the prefix `foo/` should return
// `foo/bar.json` but not `foo_test.json'.
let mut prefix = ObjectStorePath::default();
prefix.push_all(&["test", ""]);
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test/");
}
#[test]
fn push_encodes() {
let mut location = ObjectStorePath::default();
location.push("foo/bar");
location.push("baz%2Ftest");
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn push_all_encodes() {
let mut location = ObjectStorePath::default();
location.push_all(&["foo/bar", "baz%2Ftest"]);
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn starts_with_parts() {
let mut haystack = ObjectStorePath::default();
haystack.push_all(&["foo/bar", "baz%2Ftest", "something"]);
assert!(
haystack.starts_with(&haystack),
"{:?} should have started with {:?}",
haystack,
haystack
);
let mut needle = haystack.clone();
needle.push("longer now");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("foo/bar");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("baz%2Ftest");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("f");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("oo/bar");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push_all(&["foo/bar", "baz"]);
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
}
}
| FileConverter | identifier_name |
path.rs | //! This module contains code for abstracting object locations that work
//! across different backing implementations and platforms.
use itertools::Itertools;
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet, CONTROLS};
use std::path::PathBuf;
/// Universal interface for handling paths and locations for objects and
/// directories in the object store.
///
/// It allows IOx to be completely decoupled from the underlying object store
/// implementations.
///
/// Deliberately does not implement `Display` or `ToString`! Use one of the
/// converters.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct ObjectStorePath {
parts: Vec<PathPart>,
}
impl ObjectStorePath {
/// For use when receiving a path from an object store API directly, not
/// when building a path. Assumes DELIMITER is the separator.
///
/// TODO: Improve performance by implementing a CoW-type model to delay
/// parsing until needed TODO: This should only be available to cloud
/// storage
pub fn from_cloud_unchecked(path: impl Into<String>) -> Self {
let path = path.into();
Self {
parts: path
.split_terminator(DELIMITER)
.map(|s| PathPart(s.to_string()))
.collect(),
}
}
/// For use when receiving a path from a filesystem directly, not
/// when building a path. Uses the standard library's path splitting
/// implementation to separate into parts.
pub fn from_path_buf_unchecked(path: impl Into<PathBuf>) -> Self {
let path = path.into();
Self {
parts: path
.iter()
.flat_map(|s| s.to_os_string().into_string().map(PathPart))
.collect(),
}
}
/// Add a part to the end of the path, encoding any restricted characters.
pub fn push(&mut self, part: impl Into<String>) {
let part = part.into();
self.parts.push((&*part).into());
}
/// Add a `PathPart` to the end of the path. Infallible because the
/// `PathPart` should already have been checked for restricted
/// characters.
pub fn push_part(&mut self, part: &PathPart) {
self.parts.push(part.to_owned());
}
/// Add the parts of `ObjectStorePath` to the end of the path. Notably does
/// *not* behave as `PathBuf::push` does: no existing part of `self`
/// will be replaced as part of this call.
pub fn push_path(&mut self, path: &Self) {
self.parts.extend_from_slice(&path.parts);
}
/// Push a bunch of parts in one go.
pub fn push_all<'a>(&mut self, parts: impl AsRef<[&'a str]>) {
self.parts.extend(parts.as_ref().iter().map(|&v| v.into()));
}
/// Return the component parts of the path.
pub fn as_parts(&self) -> &[PathPart] {
self.parts.as_ref()
}
/// Pops a part from the path and returns it, or `None` if it's empty.
pub fn pop(&mut self) -> Option<&PathPart> {
unimplemented!()
}
/// Determines whether `prefix` is a prefix of `self`.
pub fn starts_with(&self, prefix: &Self) -> bool {
let diff = itertools::diff_with(self.parts.iter(), prefix.parts.iter(), |a, b| a == b);
match diff {
None => true,
Some(itertools::Diff::Shorter(..)) => true,
Some(itertools::Diff::FirstMismatch(_, mut remaining_self, mut remaining_prefix)) => {
let first_prefix = remaining_prefix.next().expect("must be at least one value");
// there must not be any other remaining parts in the prefix
remaining_prefix.next().is_none()
// and the next item in self must start with the last item in the prefix
&& remaining_self
.next()
.expect("must be at least one value")
.0
.starts_with(&first_prefix.0)
}
_ => false,
}
}
/// Returns delimiter-separated parts contained in `self` after `prefix`.
pub fn parts_after_prefix(&self, _prefix: &Self) -> &[PathPart] {
unimplemented!()
}
}
// TODO: I made these structs rather than functions because I could see
// `convert` being part of a trait, possibly, but that seemed a bit overly
// complex for now.
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in cloud storage.
#[derive(Debug, Clone, Copy)]
pub struct CloudConverter {}
impl CloudConverter {
/// Creates a cloud storage location by joining this `ObjectStorePath`'s
/// parts with `DELIMITER`
pub fn convert(object_store_path: &ObjectStorePath) -> String {
object_store_path.parts.iter().map(|p| &p.0).join(DELIMITER)
}
}
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in filesystem storage.
#[derive(Debug, Clone, Copy)]
pub struct FileConverter {}
impl FileConverter {
/// Creates a filesystem `PathBuf` location by using the standard library's
/// `PathBuf` building implementation appropriate for the current
/// platform.
pub fn convert(object_store_path: &ObjectStorePath) -> PathBuf {
object_store_path.parts.iter().map(|p| &p.0).collect()
}
}
/// The delimiter to separate object namespaces, creating a directory structure.
pub const DELIMITER: &str = "/";
// percent_encode's API needs this as a byte
const DELIMITER_BYTE: u8 = DELIMITER.as_bytes()[0];
/// The PathPart type exists to validate the directory/file names that form part
/// of a path.
///
/// A PathPart instance is guaranteed to contain no `/` characters as it can
/// only be constructed by going through the `try_from` impl.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct PathPart(String);
/// Characters we want to encode.
const INVALID: &AsciiSet = &CONTROLS
// The delimiter we are reserving for internal hierarchy
.add(DELIMITER_BYTE)
// Characters AWS recommends avoiding for object keys
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
.add(b'\\')
.add(b'{')
// TODO: Non-printable ASCII characters (128–255 decimal characters)
.add(b'^')
.add(b'}')
.add(b'%')
.add(b'`')
.add(b']')
.add(b'"') // " <-- my editor is confused about double quotes within single quotes
.add(b'>')
.add(b'[')
.add(b'~')
.add(b'<')
.add(b'#')
.add(b'|')
// Characters Google Cloud Storage recommends avoiding for object names
// https://cloud.google.com/storage/docs/naming-objects
.add(b'\r')
.add(b'\n')
.add(b'*')
.add(b'?');
impl From<&str> for PathPart {
fn from(v: &str) -> Self {
match v {
// We don't want to encode `.` generally, but we do want to disallow parts of paths
// to be equal to `.` or `..` to prevent file system traversal shenanigans.
"." => Self(String::from("%2E")),
".." => Self(String::from("%2E%2E")),
other => Self(percent_encode(other.as_bytes(), INVALID).to_string()),
}
}
}
impl std::fmt::Display for PathPart {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
percent_decode_str(&self.0)
.decode_utf8()
.expect("Valid UTF-8 that came from String")
.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn path_part_delimiter_gets_encoded() {
let part: PathPart = "foo/bar".into();
assert_eq!(part, PathPart(String::from("foo%2Fbar")));
}
#[test]
fn path_part_gets_decoded_for_display() {
let part: PathPart = "foo/bar".into();
assert_eq!(part.to_string(), "foo/bar");
}
#[test]
fn path_part_given_already_encoded_string() {
let part: PathPart = "foo%2Fbar".into();
assert_eq!(part, PathPart(String::from("foo%252Fbar")));
assert_eq!(part.to_string(), "foo%2Fbar");
}
#[test]
fn path_part_cant_be_one_dot() {
let part: PathPart = ".".into();
assert_eq!(part, PathPart(String::from("%2E")));
assert_eq!(part.to_string(), ".");
}
#[test]
fn path_part_cant_be_two_dots() {
let part: PathPart = "..".into();
assert_eq!(part, PathPart(String::from("%2E%2E")));
assert_eq!(part.to_string(), "..");
}
// Invariants to maintain/document/test:
//
// - always ends in DELIMITER if it's a directory. If it's the end object, it
// should have some sort of file extension like.parquet,.json, or.segment
// - does not contain unencoded DELIMITER
// - for file paths: does not escape root dir
// - for object storage: looks like directories
// - Paths that come from object stores directly don't need to be
// parsed/validated
// - Within a process, the same backing store will always be used
//
#[test]
fn cloud_prefix_no_trailing_delimiter_or_filename() {
// Use case: a file named `test_file.json` exists in object storage and it
// should be returned for a search on prefix `test`, so the prefix path
// should not get a trailing delimiter automatically added
let mut prefix = ObjectStorePath::default();
prefix.push("test");
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test");
}
#[test]
fn cloud_prefix_with_trailing_delimiter() {
// Use case: files exist in object storage named `foo/bar.json` and
// `foo_test.json`. A search for the prefix `foo/` should return
// `foo/bar.json` but not `foo_test.json'.
let mut prefix = ObjectStorePath::default();
prefix.push_all(&["test", ""]);
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test/"); | fn push_encodes() {
let mut location = ObjectStorePath::default();
location.push("foo/bar");
location.push("baz%2Ftest");
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn push_all_encodes() {
let mut location = ObjectStorePath::default();
location.push_all(&["foo/bar", "baz%2Ftest"]);
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn starts_with_parts() {
let mut haystack = ObjectStorePath::default();
haystack.push_all(&["foo/bar", "baz%2Ftest", "something"]);
assert!(
haystack.starts_with(&haystack),
"{:?} should have started with {:?}",
haystack,
haystack
);
let mut needle = haystack.clone();
needle.push("longer now");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("foo/bar");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("baz%2Ftest");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("f");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("oo/bar");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push_all(&["foo/bar", "baz"]);
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
}
} | }
#[test] | random_line_split |
mod.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The device layer.
pub(crate) mod arp;
pub(crate) mod ethernet;
pub(crate) mod ndp;
use std::fmt::{self, Debug, Display, Formatter};
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, IpAddress, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{LinkLocalAddr, MulticastAddr};
use packet::{BufferMut, Serializer};
use crate::data_structures::{IdMap, IdMapCollectionKey};
use crate::device::ethernet::{EthernetDeviceState, EthernetDeviceStateBuilder};
use crate::{BufferDispatcher, Context, EventDispatcher, StackState};
/// An ID identifying a device.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct DeviceId {
id: usize,
protocol: DeviceProtocol,
}
impl DeviceId {
/// Construct a new `DeviceId` for an Ethernet device.
pub(crate) fn new_ethernet(id: usize) -> DeviceId {
DeviceId { id, protocol: DeviceProtocol::Ethernet }
}
/// Get the protocol-specific ID for this `DeviceId`.
pub fn id(self) -> usize {
self.id
}
/// Get the protocol for this `DeviceId`.
pub fn protocol(self) -> DeviceProtocol {
self.protocol
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.protocol, self.id)
}
}
impl Debug for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
Display::fmt(self, f)
}
}
impl IdMapCollectionKey for DeviceId {
const VARIANT_COUNT: usize = 1;
fn get_variant(&self) -> usize {
match self.protocol {
DeviceProtocol::Ethernet => 0,
}
}
fn get_id(&self) -> usize {
self.id as usize
}
}
/// Type of device protocol.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum DeviceProtocol {
Ethernet,
}
impl Display for DeviceProtocol {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
match self {
DeviceProtocol::Ethernet => "Ethernet",
}
)
}
}
// TODO(joshlf): Does the IP layer ever need to distinguish between broadcast
// and multicast frames?
/// The type of address used as the source address in a device-layer frame:
/// unicast or broadcast.
///
/// `FrameDestination` is used to implement RFC 1122 section 3.2.2 and RFC 4443
/// section 2.4.e, which govern when to avoid sending an ICMP error message for
/// ICMP and ICMPv6 respectively.
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum FrameDestination {
/// A unicast address - one which is neither multicast nor broadcast.
Unicast,
/// A multicast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Multicast,
/// A broadcast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
/// | /// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn get_ip_addr_subnet_with_tentative<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T> {
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
}
/// Borrow the content which is stored inside.
pub(crate) fn inner(&self) -> &T {
&self.0
}
/// Similar to `Option::map`.
pub(crate) fn map<U, F>(self, f: F) -> Tentative<U>
where
F: FnOnce(T) -> U,
{
Tentative(f(self.0), self.1)
}
/// Make the tentative value to be permanent.
pub(crate) fn mark_permanent(&mut self) {
self.1 = false
}
} | random_line_split |
|
mod.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The device layer.
pub(crate) mod arp;
pub(crate) mod ethernet;
pub(crate) mod ndp;
use std::fmt::{self, Debug, Display, Formatter};
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, IpAddress, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{LinkLocalAddr, MulticastAddr};
use packet::{BufferMut, Serializer};
use crate::data_structures::{IdMap, IdMapCollectionKey};
use crate::device::ethernet::{EthernetDeviceState, EthernetDeviceStateBuilder};
use crate::{BufferDispatcher, Context, EventDispatcher, StackState};
/// An ID identifying a device.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct DeviceId {
id: usize,
protocol: DeviceProtocol,
}
impl DeviceId {
/// Construct a new `DeviceId` for an Ethernet device.
pub(crate) fn new_ethernet(id: usize) -> DeviceId {
DeviceId { id, protocol: DeviceProtocol::Ethernet }
}
/// Get the protocol-specific ID for this `DeviceId`.
pub fn id(self) -> usize {
self.id
}
/// Get the protocol for this `DeviceId`.
pub fn protocol(self) -> DeviceProtocol {
self.protocol
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.protocol, self.id)
}
}
impl Debug for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
Display::fmt(self, f)
}
}
impl IdMapCollectionKey for DeviceId {
const VARIANT_COUNT: usize = 1;
fn get_variant(&self) -> usize {
match self.protocol {
DeviceProtocol::Ethernet => 0,
}
}
fn get_id(&self) -> usize {
self.id as usize
}
}
/// Type of device protocol.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum DeviceProtocol {
Ethernet,
}
impl Display for DeviceProtocol {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
match self {
DeviceProtocol::Ethernet => "Ethernet",
}
)
}
}
// TODO(joshlf): Does the IP layer ever need to distinguish between broadcast
// and multicast frames?
/// The type of address used as the source address in a device-layer frame:
/// unicast or broadcast.
///
/// `FrameDestination` is used to implement RFC 1122 section 3.2.2 and RFC 4443
/// section 2.4.e, which govern when to avoid sending an ICMP error message for
/// ICMP and ICMPv6 respectively.
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum FrameDestination {
/// A unicast address - one which is neither multicast nor broadcast.
Unicast,
/// A multicast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Multicast,
/// A broadcast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn get_ip_addr_subnet_with_tentative<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T> |
/// Borrow the content which is stored inside.
pub(crate) fn inner(&self) -> &T {
&self.0
}
/// Similar to `Option::map`.
pub(crate) fn map<U, F>(self, f: F) -> Tentative<U>
where
F: FnOnce(T) -> U,
{
Tentative(f(self.0), self.1)
}
/// Make the tentative value to be permanent.
pub(crate) fn mark_permanent(&mut self) {
self.1 = false
}
}
| {
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
} | identifier_body |
mod.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The device layer.
pub(crate) mod arp;
pub(crate) mod ethernet;
pub(crate) mod ndp;
use std::fmt::{self, Debug, Display, Formatter};
use log::{debug, trace};
use net_types::ethernet::Mac;
use net_types::ip::{AddrSubnet, IpAddress, Ipv4Addr, Ipv6, Ipv6Addr};
use net_types::{LinkLocalAddr, MulticastAddr};
use packet::{BufferMut, Serializer};
use crate::data_structures::{IdMap, IdMapCollectionKey};
use crate::device::ethernet::{EthernetDeviceState, EthernetDeviceStateBuilder};
use crate::{BufferDispatcher, Context, EventDispatcher, StackState};
/// An ID identifying a device.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct DeviceId {
id: usize,
protocol: DeviceProtocol,
}
impl DeviceId {
/// Construct a new `DeviceId` for an Ethernet device.
pub(crate) fn new_ethernet(id: usize) -> DeviceId {
DeviceId { id, protocol: DeviceProtocol::Ethernet }
}
/// Get the protocol-specific ID for this `DeviceId`.
pub fn id(self) -> usize {
self.id
}
/// Get the protocol for this `DeviceId`.
pub fn protocol(self) -> DeviceProtocol {
self.protocol
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.protocol, self.id)
}
}
impl Debug for DeviceId {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
Display::fmt(self, f)
}
}
impl IdMapCollectionKey for DeviceId {
const VARIANT_COUNT: usize = 1;
fn get_variant(&self) -> usize {
match self.protocol {
DeviceProtocol::Ethernet => 0,
}
}
fn get_id(&self) -> usize {
self.id as usize
}
}
/// Type of device protocol.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum DeviceProtocol {
Ethernet,
}
impl Display for DeviceProtocol {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
match self {
DeviceProtocol::Ethernet => "Ethernet",
}
)
}
}
// TODO(joshlf): Does the IP layer ever need to distinguish between broadcast
// and multicast frames?
/// The type of address used as the source address in a device-layer frame:
/// unicast or broadcast.
///
/// `FrameDestination` is used to implement RFC 1122 section 3.2.2 and RFC 4443
/// section 2.4.e, which govern when to avoid sending an ICMP error message for
/// ICMP and ICMPv6 respectively.
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum FrameDestination {
/// A unicast address - one which is neither multicast nor broadcast.
Unicast,
/// A multicast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Multicast,
/// A broadcast address; if the addressing scheme supports overlap between
/// multicast and broadcast, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn | <D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T> {
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
}
/// Borrow the content which is stored inside.
pub(crate) fn inner(&self) -> &T {
&self.0
}
/// Similar to `Option::map`.
pub(crate) fn map<U, F>(self, f: F) -> Tentative<U>
where
F: FnOnce(T) -> U,
{
Tentative(f(self.0), self.1)
}
/// Make the tentative value to be permanent.
pub(crate) fn mark_permanent(&mut self) {
self.1 = false
}
}
| get_ip_addr_subnet_with_tentative | identifier_name |
old_main.rs |
let mut app = App::new(opengl, window);
while let Some(e) = events.next(&mut app.window) {
e.render(|args| {
app.render(args);
});
e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) =>!point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos,.. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room {.. } => WALKABLE_ROOM_COLOR,
FloorNode::Door {.. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
}
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px,.. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos,.. }| {
*player_pos = pos;
});
}
}
}
struct | {
current: Option<FloorNodeId>,
last_pointer: Option<Point>
}
impl PointedRoom {
fn new() -> Self {
PointedRoom {
current: None,
last_pointer: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_pointer = None;
}
fn update(&mut self, pointer: Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_pointer {
Some(last) => last[0]!= pointer[0] || last[1]!= pointer[1],
None => true,
};
if should_update {
self.current = graph.node_at_point(&pointer).map(|n| n.id()).cloned();
self.last_pointer = Some(pointer);
}
}
}
struct PccState<'a> {
pub cursor_px: &'a mut [f64; 2],
pub screen_px: &'a mut [f64; 2],
pub player_pos: &'a mut [f64; 2],
}
struct PlayerCameraCursor {
cursor_px: [f64; 2],
cursor_pos: [f64; 2],
screen_px: [f64; 2],
player_pos: [f64; 2],
camera: Matrix2d,
camera_inv: Matrix2d,
dirty: bool,
}
impl PlayerCameraCursor {
fn new(screen_size: [u32; 2]) -> Self {
PlayerCameraCursor {
cursor_px: [0.0, 0.0],
cursor_pos: [0.0, 0.0],
screen_px: [screen_size[0] as f64, screen_size[1] as f64],
player_pos: [screen_size[0] as f64 / 2.0, screen_size[1] as f64 / 2.0],
camera: identity(),
camera_inv: identity(),
dirty: true,
}
}
fn update(&mut self) {
if self.dirty {
let zoom_factor = 4.0;
// this is some kind of voodoo...
// for one, the order of operations seems wrong to me
// for two, after translating by `-player_pos` without a scale factor,
// you have to apply the scale factor to the half_screen translation??
self.camera = identity()
.zoom(zoom_factor)
.trans_pos(vec2_neg(self.player_pos))
.trans_pos(vec2_scale(self.screen_px, 0.5 / zoom_factor));
self.camera_inv = mat2x3_inv(self.camera);
self.cursor_pos = row_mat2x3_transform_pos2(self.camera_inv, self.cursor_px);
self.dirty = false;
}
}
fn modify<F>(&mut self, f: F)
where F: FnOnce(PccState) -> ()
{
let [cx1, cy1] = self.cursor_px;
let [sx1, sy1] = self.screen_px;
let [px1, py1] = self.player_pos;
f(PccState {
cursor_px: &mut self.cursor_px,
screen_px: &mut self.screen_px,
player_pos: &mut self.player_pos,
});
let [cx2, cy2] = self.cursor_px;
let [sx2, sy2] = self.screen_px;
let [px2, py2] = self.player_pos;
if (cx1!= cx2) || (cy1!= cy2) || (sx1!= sx2) || (sy1!= sy2) || (px1!= px2) || (py1!= py2) {
self.dirty = true;
}
}
}
fn draw_horizontal_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x + 0.25, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y), pixel_pos(x + 1.0, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.25, y + 0.1), pixel_pos(x + 0.25, y - 0.1), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y + 0.1), pixel_pos(x + 0.75, y - 0.1), ctx.transform, gl);
}
fn draw_vertical_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel | PointedRoom | identifier_name |
old_main.rs |
let mut app = App::new(opengl, window);
while let Some(e) = events.next(&mut app.window) {
e.render(|args| {
app.render(args);
});
e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) =>!point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos,.. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room {.. } => WALKABLE_ROOM_COLOR,
FloorNode::Door {.. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt |
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px,.. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos,.. }| {
*player_pos = pos;
});
}
}
}
struct PointedRoom {
current: Option<FloorNodeId>,
last_pointer: Option<Point>
}
impl PointedRoom {
fn new() -> Self {
PointedRoom {
current: None,
last_pointer: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_pointer = None;
}
fn update(&mut self, pointer: Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_pointer {
Some(last) => last[0]!= pointer[0] || last[1]!= pointer[1],
None => true,
};
if should_update {
self.current = graph.node_at_point(&pointer).map(|n| n.id()).cloned();
self.last_pointer = Some(pointer);
}
}
}
struct PccState<'a> {
pub cursor_px: &'a mut [f64; 2],
pub screen_px: &'a mut [f64; 2],
pub player_pos: &'a mut [f64; 2],
}
struct PlayerCameraCursor {
cursor_px: [f64; 2],
cursor_pos: [f64; 2],
screen_px: [f64; 2],
player_pos: [f64; 2],
camera: Matrix2d,
camera_inv: Matrix2d,
dirty: bool,
}
impl PlayerCameraCursor {
fn new(screen_size: [u32; 2]) -> Self {
PlayerCameraCursor {
cursor_px: [0.0, 0.0],
cursor_pos: [0.0, 0.0],
screen_px: [screen_size[0] as f64, screen_size[1] as f64],
player_pos: [screen_size[0] as f64 / 2.0, screen_size[1] as f64 / 2.0],
camera: identity(),
camera_inv: identity(),
dirty: true,
}
}
fn update(&mut self) {
if self.dirty {
let zoom_factor = 4.0;
// this is some kind of voodoo...
// for one, the order of operations seems wrong to me
// for two, after translating by `-player_pos` without a scale factor,
// you have to apply the scale factor to the half_screen translation??
self.camera = identity()
.zoom(zoom_factor)
.trans_pos(vec2_neg(self.player_pos))
.trans_pos(vec2_scale(self.screen_px, 0.5 / zoom_factor));
self.camera_inv = mat2x3_inv(self.camera);
self.cursor_pos = row_mat2x3_transform_pos2(self.camera_inv, self.cursor_px);
self.dirty = false;
}
}
fn modify<F>(&mut self, f: F)
where F: FnOnce(PccState) -> ()
{
let [cx1, cy1] = self.cursor_px;
let [sx1, sy1] = self.screen_px;
let [px1, py1] = self.player_pos;
f(PccState {
cursor_px: &mut self.cursor_px,
screen_px: &mut self.screen_px,
player_pos: &mut self.player_pos,
});
let [cx2, cy2] = self.cursor_px;
let [sx2, sy2] = self.screen_px;
let [px2, py2] = self.player_pos;
if (cx1!= cx2) || (cy1!= cy2) || (sx1!= sx2) || (sy1!= sy2) || (px1!= px2) || (py1!= py2) {
self.dirty = true;
}
}
}
fn draw_horizontal_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x + 0.25, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y), pixel_pos(x + 1.0, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.25, y + 0.1), pixel_pos(x + 0.25, y - 0.1), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y + 0.1), pixel_pos(x + 0.75, y - 0.1), ctx.transform, gl);
}
fn draw_vertical_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel | {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
} | conditional_block |
old_main.rs | ());
let mut app = App::new(opengl, window);
while let Some(e) = events.next(&mut app.window) {
e.render(|args| { | e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) =>!point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos,.. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room {.. } => WALKABLE_ROOM_COLOR,
FloorNode::Door {.. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
}
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px,.. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos,.. }| {
*player_pos = pos;
});
}
}
}
struct PointedRoom {
current: Option<FloorNodeId>,
last_pointer: Option<Point>
}
impl PointedRoom {
fn new() -> Self {
PointedRoom {
current: None,
last_pointer: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_pointer = None;
}
fn update(&mut self, pointer: Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_pointer {
Some(last) => last[0]!= pointer[0] || last[1]!= pointer[1],
None => true,
};
if should_update {
self.current = graph.node_at_point(&pointer).map(|n| n.id()).cloned();
self.last_pointer = Some(pointer);
}
}
}
struct PccState<'a> {
pub cursor_px: &'a mut [f64; 2],
pub screen_px: &'a mut [f64; 2],
pub player_pos: &'a mut [f64; 2],
}
struct PlayerCameraCursor {
cursor_px: [f64; 2],
cursor_pos: [f64; 2],
screen_px: [f64; 2],
player_pos: [f64; 2],
camera: Matrix2d,
camera_inv: Matrix2d,
dirty: bool,
}
impl PlayerCameraCursor {
fn new(screen_size: [u32; 2]) -> Self {
PlayerCameraCursor {
cursor_px: [0.0, 0.0],
cursor_pos: [0.0, 0.0],
screen_px: [screen_size[0] as f64, screen_size[1] as f64],
player_pos: [screen_size[0] as f64 / 2.0, screen_size[1] as f64 / 2.0],
camera: identity(),
camera_inv: identity(),
dirty: true,
}
}
fn update(&mut self) {
if self.dirty {
let zoom_factor = 4.0;
// this is some kind of voodoo...
// for one, the order of operations seems wrong to me
// for two, after translating by `-player_pos` without a scale factor,
// you have to apply the scale factor to the half_screen translation??
self.camera = identity()
.zoom(zoom_factor)
.trans_pos(vec2_neg(self.player_pos))
.trans_pos(vec2_scale(self.screen_px, 0.5 / zoom_factor));
self.camera_inv = mat2x3_inv(self.camera);
self.cursor_pos = row_mat2x3_transform_pos2(self.camera_inv, self.cursor_px);
self.dirty = false;
}
}
fn modify<F>(&mut self, f: F)
where F: FnOnce(PccState) -> ()
{
let [cx1, cy1] = self.cursor_px;
let [sx1, sy1] = self.screen_px;
let [px1, py1] = self.player_pos;
f(PccState {
cursor_px: &mut self.cursor_px,
screen_px: &mut self.screen_px,
player_pos: &mut self.player_pos,
});
let [cx2, cy2] = self.cursor_px;
let [sx2, sy2] = self.screen_px;
let [px2, py2] = self.player_pos;
if (cx1!= cx2) || (cy1!= cy2) || (sx1!= sx2) || (sy1!= sy2) || (px1!= px2) || (py1!= py2) {
self.dirty = true;
}
}
}
fn draw_horizontal_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos = |xt: f64, yt: f64| { [xt * tile_size, yt * tile_size] };
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x, y), pixel_pos(x + 0.25, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y), pixel_pos(x + 1.0, y), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.25, y + 0.1), pixel_pos(x + 0.25, y - 0.1), ctx.transform, gl);
graphics::line_from_to(DEBUG_WALL_COLOR, 0.5, pixel_pos(x + 0.75, y + 0.1), pixel_pos(x + 0.75, y - 0.1), ctx.transform, gl);
}
fn draw_vertical_door(ctx: &Context, gl: &mut GlGraphics, tile_size: f64, x: f64, y: f64) {
let pixel_pos | app.render(args);
});
| random_line_split |
aarch64.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Display, Formatter};
use std::result;
use arch::aarch64::regs::Aarch64Register;
use kvm_ioctls::*;
use logger::{error, IncMetric, METRICS};
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
/// Error configuring the general purpose aarch64 registers.
ConfigureRegisters(arch::aarch64::regs::Error),
/// Cannot open the kvm related file descriptor.
CreateFd(kvm_ioctls::Error),
/// Error getting the Vcpu preferred target on Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(), | (os error 8)"
.to_string()
);
init_vcpu(&vcpu.fd, vm.fd());
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
let value = vcpu
.fd
.get_one_reg(0x6030_0000_0010_003E)
.expect("Cannot get sp core register");
assert!(state.regs.contains(&Aarch64Register {
id: 0x6030_0000_0010_003E,
value
}));
}
#[test]
fn test_setup_non_boot_vcpu() {
let (vm, _) = setup_vm(0x1000);
let vcpu1 = KvmVcpu::new(0, &vm).unwrap();
assert!(vcpu1.init(vm.fd()).is_ok());
let vcpu2 = KvmVcpu::new(1, &vm).unwrap();
assert!(vcpu2.init(vm.fd()).is_ok());
}
} | "Failed to restore the state of the vcpu: Failed to set register: Exec format error \ | random_line_split |
aarch64.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Display, Formatter};
use std::result;
use arch::aarch64::regs::Aarch64Register;
use kvm_ioctls::*;
use logger::{error, IncMetric, METRICS};
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
/// Error configuring the general purpose aarch64 registers.
ConfigureRegisters(arch::aarch64::regs::Error),
/// Cannot open the kvm related file descriptor.
CreateFd(kvm_ioctls::Error),
/// Error getting the Vcpu preferred target on Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn | (index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to restore the state of the vcpu: Failed to set register: Exec format error \
(os error 8)"
.to_string()
);
init_vcpu(&vcpu.fd, vm.fd());
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
let value = vcpu
.fd
.get_one_reg(0x6030_0000_0010_003E)
.expect("Cannot get sp core register");
assert!(state.regs.contains(&Aarch64Register {
id: 0x6030_0000_0010_003E,
value
}));
}
#[test]
fn test_setup_non_boot_vcpu() {
let (vm, _) = setup_vm(0x1000);
let vcpu1 = KvmVcpu::new(0, &vm).unwrap();
assert!(vcpu1.init(vm.fd()).is_ok());
let vcpu2 = KvmVcpu::new(1, &vm).unwrap();
assert!(vcpu2.init(vm.fd()).is_ok());
}
}
| new | identifier_name |
aarch64.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Display, Formatter};
use std::result;
use arch::aarch64::regs::Aarch64Register;
use kvm_ioctls::*;
use logger::{error, IncMetric, METRICS};
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug)]
pub enum Error {
/// Error configuring the general purpose aarch64 registers.
ConfigureRegisters(arch::aarch64::regs::Error),
/// Cannot open the kvm related file descriptor.
CreateFd(kvm_ioctls::Error),
/// Error getting the Vcpu preferred target on Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> |
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to restore the state of the vcpu: Failed to set register: Exec format error \
(os error 8)"
.to_string()
);
init_vcpu(&vcpu.fd, vm.fd());
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
let value = vcpu
.fd
.get_one_reg(0x6030_0000_0010_003E)
.expect("Cannot get sp core register");
assert!(state.regs.contains(&Aarch64Register {
id: 0x6030_0000_0010_003E,
value
}));
}
#[test]
fn test_setup_non_boot_vcpu() {
let (vm, _) = setup_vm(0x1000);
let vcpu1 = KvmVcpu::new(0, &vm).unwrap();
assert!(vcpu1.init(vm.fd()).is_ok());
let vcpu2 = KvmVcpu::new(1, &vm).unwrap();
assert!(vcpu2.init(vm.fd()).is_ok());
}
}
| {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
} | identifier_body |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]); | assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
} | assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]); | random_line_split |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => |
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
}
| {} | conditional_block |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> |
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
}
| {
Vec::new()
} | identifier_body |
ic7406.rs | // Copyright (c) 2021 Thomas J. Otterson
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
// Note that the imports for std::rc::Rc, std::cell::RefCell, and
// crate::components::pin::Pin are only necessary because of the demo non-macro constructor
// function.
/// Pin assignment constants for the Ic7406 struct.
pub mod constants {
/// The pin assignment for the input of inverter 1.
pub const A1: usize = 1;
/// The pin assignment for the input of inverter 2.
pub const A2: usize = 3;
/// The pin assignment for the input of inverter 3.
pub const A3: usize = 5;
/// The pin assignment for the input of inverter 4.
pub const A4: usize = 9;
/// The pin assignment for the input of inverter 5.
pub const A5: usize = 11;
/// The pin assignment for the input of inverter 6.
pub const A6: usize = 13;
/// The pin assignment for the output of inverter 1.
pub const Y1: usize = 2;
/// The pin assignment for the output of inverter 2.
pub const Y2: usize = 4;
/// The pin assignment for the output of inverter 3.
pub const Y3: usize = 6;
/// The pin assignment for the output of inverter 4.
pub const Y4: usize = 8;
/// The pin assignment for the output of inverter 5.
pub const Y5: usize = 10;
/// The pin assignment for the output of inverter 6.
pub const Y6: usize = 12;
/// The pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn | () {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
assert!(low!(tr[Y6]), "Y6 should be low when A6 is high");
}
#[test]
fn input_low() {
let (_, tr) = before_each();
clear!(tr[A1]);
assert!(high!(tr[Y1]), "Y1 should be high when A1 is low");
clear!(tr[A2]);
assert!(high!(tr[Y2]), "Y2 should be high when A2 is low");
clear!(tr[A3]);
assert!(high!(tr[Y3]), "Y3 should be high when A3 is low");
clear!(tr[A4]);
assert!(high!(tr[Y4]), "Y4 should be high when A4 is low");
clear!(tr[A5]);
assert!(high!(tr[Y5]), "Y5 should be high when A5 is low");
clear!(tr[A6]);
assert!(high!(tr[Y6]), "Y6 should be high when A6 is low");
}
// Duplicate tests using no macros. These use the non-macro creation function as well
// because I like the symmetry. Only this struct has non-macro versions of the tests,
// and it's just for demonstration purposes.
#[test]
fn input_high_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().set();
assert!(tr[Y1].borrow().low(), "Y1 should be low when A1 is high");
tr[A2].borrow_mut().set();
assert!(tr[Y2].borrow().low(), "Y2 should be low when A2 is high");
tr[A3].borrow_mut().set();
assert!(tr[Y3].borrow().low(), "Y3 should be low when A3 is high");
tr[A4].borrow_mut().set();
assert!(tr[Y4].borrow().low(), "Y4 should be low when A4 is high");
tr[A5].borrow_mut().set();
assert!(tr[Y5].borrow().low(), "Y5 should be low when A5 is high");
tr[A6].borrow_mut().set();
assert!(tr[Y6].borrow().low(), "Y6 should be low when A6 is high");
}
#[test]
fn input_low_no_macro() {
let (_, tr) = before_each();
tr[A1].borrow_mut().clear();
assert!(tr[Y1].borrow().high(), "Y1 should be high when A1 is low");
tr[A2].borrow_mut().clear();
assert!(tr[Y2].borrow().high(), "Y2 should be high when A2 is low");
tr[A3].borrow_mut().clear();
assert!(tr[Y3].borrow().high(), "Y3 should be high when A3 is low");
tr[A4].borrow_mut().clear();
assert!(tr[Y4].borrow().high(), "Y4 should be high when A4 is low");
tr[A5].borrow_mut().clear();
assert!(tr[Y5].borrow().high(), "Y5 should be high when A5 is low");
tr[A6].borrow_mut().clear();
assert!(tr[Y6].borrow().high(), "Y6 should be high when A6 is low");
}
}
| input_high | identifier_name |
graphics.rs | use std::collections::HashMap;
use std::fs;
use wgpu::util::DeviceExt;
use crate::texture::Texture;
pub struct Graphics {
pub surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: (u32, u32),
pub models: HashMap<String, Mesh>,
pub textures: HashMap<String, wgpu::BindGroup>,
pub pipelines: HashMap<String, wgpu::RenderPipeline>,
pub uniforms: Uniforms,
pub uniform_buffer: wgpu::Buffer,
pub uniform_bind_group: wgpu::BindGroup,
texture_layout: wgpu::BindGroupLayout,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn Desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelProperties {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup |
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let uniforms = Uniforms::new();
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}
],
label: Some("uniform_bind_group"),
});
let mut pipelines : HashMap::<String, wgpu::RenderPipeline> = HashMap::new();
let pipeline = new_pipeline(&device, swap_chain_descriptor.format, "sprite.vert.spv", "sprite.frag.spv", &texture_layout, &uniform_bind_group_layout, wgpu::PrimitiveTopology::TriangleList, wgpu::PolygonMode::Fill);
pipelines.insert("sprite".to_owned(), pipeline);
let mut models : HashMap::<String, Mesh> = HashMap::new();
let mut triangle_mesh = create_quad();
triangle_mesh.upload_to_gpu(&device);
models.insert("quad".to_owned(), triangle_mesh);
let mut textures : HashMap<String, wgpu::BindGroup> = HashMap::new();
textures.insert("spaceship.png".to_owned(), upload_texture_to_gpu("spaceship.png", &device, &queue, &texture_layout));
Self {
surface,
device,
queue,
swap_chain_descriptor,
swap_chain,
size,
models,
textures,
pipelines,
texture_layout,
uniforms,
uniform_buffer,
uniform_bind_group,
}
}
pub fn resize(&mut self, new_size: (u32, u32)) {
self.size = new_size;
self.swap_chain_descriptor.width = new_size.0;
self.swap_chain_descriptor.height = new_size.1;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
}
} | {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
} | identifier_body |
graphics.rs | use std::collections::HashMap;
use std::fs;
use wgpu::util::DeviceExt;
use crate::texture::Texture;
pub struct Graphics {
pub surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: (u32, u32),
pub models: HashMap<String, Mesh>,
pub textures: HashMap<String, wgpu::BindGroup>,
pub pipelines: HashMap<String, wgpu::RenderPipeline>,
pub uniforms: Uniforms,
pub uniform_buffer: wgpu::Buffer,
pub uniform_bind_group: wgpu::BindGroup,
texture_layout: wgpu::BindGroupLayout,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn Desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelProperties {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo, | let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let uniforms = Uniforms::new();
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}
],
label: Some("uniform_bind_group"),
});
let mut pipelines : HashMap::<String, wgpu::RenderPipeline> = HashMap::new();
let pipeline = new_pipeline(&device, swap_chain_descriptor.format, "sprite.vert.spv", "sprite.frag.spv", &texture_layout, &uniform_bind_group_layout, wgpu::PrimitiveTopology::TriangleList, wgpu::PolygonMode::Fill);
pipelines.insert("sprite".to_owned(), pipeline);
let mut models : HashMap::<String, Mesh> = HashMap::new();
let mut triangle_mesh = create_quad();
triangle_mesh.upload_to_gpu(&device);
models.insert("quad".to_owned(), triangle_mesh);
let mut textures : HashMap<String, wgpu::BindGroup> = HashMap::new();
textures.insert("spaceship.png".to_owned(), upload_texture_to_gpu("spaceship.png", &device, &queue, &texture_layout));
Self {
surface,
device,
queue,
swap_chain_descriptor,
swap_chain,
size,
models,
textures,
pipelines,
texture_layout,
uniforms,
uniform_buffer,
uniform_bind_group,
}
}
pub fn resize(&mut self, new_size: (u32, u32)) {
self.size = new_size;
self.swap_chain_descriptor.width = new_size.0;
self.swap_chain_descriptor.height = new_size.1;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
}
} | };
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
| random_line_split |
graphics.rs | use std::collections::HashMap;
use std::fs;
use wgpu::util::DeviceExt;
use crate::texture::Texture;
pub struct Graphics {
pub surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub swap_chain_descriptor: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: (u32, u32),
pub models: HashMap<String, Mesh>,
pub textures: HashMap<String, wgpu::BindGroup>,
pub pipelines: HashMap<String, wgpu::RenderPipeline>,
pub uniforms: Uniforms,
pub uniform_buffer: wgpu::Buffer,
pub uniform_bind_group: wgpu::BindGroup,
texture_layout: wgpu::BindGroupLayout,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Vertex {
position: [f32; 3],
normal: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn Desc<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct | {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask:!0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
let texture_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let uniforms = Uniforms::new();
let uniform_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
}
);
let uniform_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("uniform_bind_group_layout"),
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
}
],
label: Some("uniform_bind_group"),
});
let mut pipelines : HashMap::<String, wgpu::RenderPipeline> = HashMap::new();
let pipeline = new_pipeline(&device, swap_chain_descriptor.format, "sprite.vert.spv", "sprite.frag.spv", &texture_layout, &uniform_bind_group_layout, wgpu::PrimitiveTopology::TriangleList, wgpu::PolygonMode::Fill);
pipelines.insert("sprite".to_owned(), pipeline);
let mut models : HashMap::<String, Mesh> = HashMap::new();
let mut triangle_mesh = create_quad();
triangle_mesh.upload_to_gpu(&device);
models.insert("quad".to_owned(), triangle_mesh);
let mut textures : HashMap<String, wgpu::BindGroup> = HashMap::new();
textures.insert("spaceship.png".to_owned(), upload_texture_to_gpu("spaceship.png", &device, &queue, &texture_layout));
Self {
surface,
device,
queue,
swap_chain_descriptor,
swap_chain,
size,
models,
textures,
pipelines,
texture_layout,
uniforms,
uniform_buffer,
uniform_bind_group,
}
}
pub fn resize(&mut self, new_size: (u32, u32)) {
self.size = new_size;
self.swap_chain_descriptor.width = new_size.0;
self.swap_chain_descriptor.height = new_size.1;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_descriptor);
}
} | ModelProperties | identifier_name |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write( | .attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
} | XmlEvent::start_element("key") | random_line_split |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() | else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
}
| {
"directed"
} | conditional_block |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self |
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
}
| {
self.export_edges = Some(edge_weight);
self
} | identifier_body |
lib.rs | //! [](https://docs.rs/petgraph-graphml/)
//! [](https://crates.io/crates/petgraph-graphml/)
//! [](https://github.com/jonasbb/petgraph-graphml)
//! [](https://codecov.io/gh/jonasbb/petgraph-graphml)
//!
//! ---
//!
//! This crate extends [petgraph][] with [GraphML][graphmlwebsite] output support.
//!
//! This crate exports a single type [`GraphMl`] which combines a build-pattern for configuration and provides creating strings ([`GraphMl::to_string`]) and writing to writers ([`GraphMl::to_writer`]).
//!
//! # Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! petgraph-graphml = "3.0.0"
//! ```
//!
//! # Example
//!
//! For a simple graph like this is the generated GraphML output.
//!
//! ```
//! # use petgraph::Graph;
//! # use petgraph_graphml::GraphMl;
//! # fn make_graph() -> Graph<u32, ()> {
//! # let mut graph = Graph::new();
//! # let n0 = graph.add_node(0);
//! # let n1 = graph.add_node(1);
//! # let n2 = graph.add_node(2);
//! # graph.update_edge(n0, n1, ());
//! # graph.update_edge(n1, n2, ());
//! # graph
//! # }
//! # fn main() {
//! let graph = make_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn | (&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphMl")
.field("graph", &self.graph)
.field("pretty_print", &self.pretty_print)
.field("export_edges", &self.export_edges.is_some())
.field("export_nodes", &self.export_nodes.is_some())
.finish()
}
}
impl<G> Display for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
G: GraphProp,
G: NodeIndexable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buff = Cursor::new(Vec::new());
self.to_writer(&mut buff)
.expect("Writing to a Cursor should never create IO errors.");
let s = String::from_utf8(buff.into_inner()).unwrap();
write!(f, "{}", &s)
}
}
| fmt | identifier_name |
main.rs | #![feature(rustc_private)]
extern crate im;
extern crate pretty;
extern crate rustc_ast;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_hir;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
mod ast_to_rustspec;
mod hir_to_rustspec;
mod name_resolution;
mod rustspec;
mod rustspec_to_coq;
mod rustspec_to_easycrypt;
mod rustspec_to_fstar;
mod typechecker;
mod util;
use itertools::Itertools;
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_errors::emitter::{ColorConfig, HumanReadableErrorType};
use rustc_errors::DiagnosticId;
use rustc_interface::{
interface::{Compiler, Config},
Queries,
};
use rustc_session::Session;
use rustc_session::{config::ErrorOutputType, search_paths::SearchPath};
use rustc_span::MultiSpan;
use serde::Deserialize;
use serde_json;
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct HacspecCallbacks {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x!= "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if!output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages | .find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}'...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<(), usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index = args.iter().position(|a| a == "-o");
let output_file = match output_file_index {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Optionally an input file can be passed in. This should be mostly used for
// testing.
let input_file = match args.iter().position(|a| a == "-f") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --manifest-path argument if present.
let manifest = match args.iter().position(|a| a == "--manifest-path") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --sysroot. It must be present
log::trace!("args: {:?}", args);
match args.iter().position(|a| a.starts_with("--sysroot")) {
Some(i) => {
compiler_args.push(args.remove(i));
}
None => panic!(" ⚠️ --sysroot is missing. Please report this issue."),
}
let mut callbacks = HacspecCallbacks {
output_file,
// This defaults to the default target directory.
target_directory: env::current_dir().unwrap().to_str().unwrap().to_owned()
+ "/../target/debug/deps",
};
match input_file {
Some(input_file) => {
compiler_args.push(input_file);
// If only a file is provided we add the default dependencies only.
compiler_args.extend_from_slice(&[
"--extern=abstract_integers".to_string(),
"--extern=hacspec_derive".to_string(),
"--extern=hacspec_lib".to_string(),
"--extern=secret_integers".to_string(),
]);
}
None => {
let package_name = args.pop();
log::trace!("package name to analyze: {:?}", package_name);
read_crate(manifest, package_name, &mut compiler_args, &mut callbacks);
}
}
compiler_args.push("--crate-type=lib".to_string());
compiler_args.push("--edition=2021".to_string());
log::trace!("compiler_args: {:?}", compiler_args);
let compiler = RunCompiler::new(&compiler_args, &mut callbacks);
match compiler.run() {
Ok(_) => Ok(()),
Err(_) => Err(1),
}
} | .iter() | random_line_split |
main.rs | #![feature(rustc_private)]
extern crate im;
extern crate pretty;
extern crate rustc_ast;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_hir;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
mod ast_to_rustspec;
mod hir_to_rustspec;
mod name_resolution;
mod rustspec;
mod rustspec_to_coq;
mod rustspec_to_easycrypt;
mod rustspec_to_fstar;
mod typechecker;
mod util;
use itertools::Itertools;
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_errors::emitter::{ColorConfig, HumanReadableErrorType};
use rustc_errors::DiagnosticId;
use rustc_interface::{
interface::{Compiler, Config},
Queries,
};
use rustc_session::Session;
use rustc_session::{config::ErrorOutputType, search_paths::SearchPath};
use rustc_span::MultiSpan;
use serde::Deserialize;
use serde_json;
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct | {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x!= "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if!output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
.iter()
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}'...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<(), usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index = args.iter().position(|a| a == "-o");
let output_file = match output_file_index {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Optionally an input file can be passed in. This should be mostly used for
// testing.
let input_file = match args.iter().position(|a| a == "-f") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --manifest-path argument if present.
let manifest = match args.iter().position(|a| a == "--manifest-path") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --sysroot. It must be present
log::trace!("args: {:?}", args);
match args.iter().position(|a| a.starts_with("--sysroot")) {
Some(i) => {
compiler_args.push(args.remove(i));
}
None => panic!(" ⚠️ --sysroot is missing. Please report this issue."),
}
let mut callbacks = HacspecCallbacks {
output_file,
// This defaults to the default target directory.
target_directory: env::current_dir().unwrap().to_str().unwrap().to_owned()
+ "/../target/debug/deps",
};
match input_file {
Some(input_file) => {
compiler_args.push(input_file);
// If only a file is provided we add the default dependencies only.
compiler_args.extend_from_slice(&[
"--extern=abstract_integers".to_string(),
"--extern=hacspec_derive".to_string(),
"--extern=hacspec_lib".to_string(),
"--extern=secret_integers".to_string(),
]);
}
None => {
let package_name = args.pop();
log::trace!("package name to analyze: {:?}", package_name);
read_crate(manifest, package_name, &mut compiler_args, &mut callbacks);
}
}
compiler_args.push("--crate-type=lib".to_string());
compiler_args.push("--edition=2021".to_string());
log::trace!("compiler_args: {:?}", compiler_args);
let compiler = RunCompiler::new(&compiler_args, &mut callbacks);
match compiler.run() {
Ok(_) => Ok(()),
Err(_) => Err(1),
}
}
| HacspecCallbacks | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.