file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
elf.rs | use std::convert::TryInto;
use std::ffi::CStr;
use std::mem;
use std::ptr;
use std::slice;
use failure::{bail, format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> {
if self.obj.header.e_type!= ET_REL || self.obj.header.e_machine!= EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()?!= scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || {
buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
}
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty!= Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b!= 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn | (
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx)!= maps_idx && Some(sym.st_shndx)!= text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn_idx].code
);
}
let map_idx = maps
.iter()
.position(|map| map.offset == sym.st_value as usize)
.ok_or_else(|| format_err!("map @ {} not found", sym.st_value))?;
prog.relocs.push(prog::Reloc::LD64 { insn_idx, map_idx })
}
}
}
Ok(())
}
}
| relocate_programs | identifier_name |
elf.rs | use std::convert::TryInto;
use std::ffi::CStr;
use std::mem;
use std::ptr;
use std::slice;
use failure::{bail, format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> {
if self.obj.header.e_type!= ET_REL || self.obj.header.e_machine!= EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()?!= scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || {
buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
}
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty!= Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
| debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b!= 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn relocate_programs(
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx)!= maps_idx && Some(sym.st_shndx)!= text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn_idx].code
);
}
let map_idx = maps
.iter()
.position(|map| map.offset == sym.st_value as usize)
.ok_or_else(|| format_err!("map @ {} not found", sym.st_value))?;
prog.relocs.push(prog::Reloc::LD64 { insn_idx, map_idx })
}
}
}
Ok(())
}
} | random_line_split |
|
elf.rs | use std::convert::TryInto;
use std::ffi::CStr;
use std::mem;
use std::ptr;
use std::slice;
use failure::{bail, format_err, Error, Fail, ResultExt};
use goblin::elf::{
header::{EM_BPF, ET_REL},
section_header::{SectionHeader, SHT_PROGBITS, SHT_REL},
sym::{Sym, STB_GLOBAL},
};
use ebpf_core::{
ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC,
BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC,
};
use crate::parser::Parser;
use crate::prog::prog_type_by_name;
impl<'a> Parser<goblin::elf::Elf<'a>> {
pub fn parse(&self, buf: &[u8]) -> Result<Object, Error> {
if self.obj.header.e_type!= ET_REL || self.obj.header.e_machine!= EM_BPF {
bail!("not an eBPF object file");
}
if self.obj.header.endianness()?!= scroll::NATIVE {
bail!("endianness mismatch.")
}
let mut license = None;
let mut version = None;
let mut programs = vec![];
let mut maps_section = None;
let mut text_section = None;
for (idx, sec) in self.obj.section_headers.iter().enumerate() {
let name = self.resolve_name(sec.sh_name)?;
trace!("parse `{}` section: {:?}", name, sec);
let section_data = || {
buf.get(sec.file_range()).ok_or_else(|| {
format_err!(
"`{}` section data {:?} out of bound",
name,
sec.file_range()
)
})
};
match name {
BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => |
BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => {
version = Some(u32::from_ne_bytes(section_data()?.try_into()?));
debug!("kernel version: {:x}", version.as_ref().unwrap());
}
BPF_MAPS_SEC => {
debug!("`{}` section", name);
maps_section = Some((idx, sec));
}
BTF_ELF_SEC => {
// TODO btf__new
debug!("`{}` section", name);
}
BTF_EXT_ELF_SEC => {
// TODO btf_ext_data
debug!("`{}` section", name);
}
_ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => {
if name == ".text" {
text_section = Some(idx);
}
// If type is not specified, try to guess it based on section name.
let (ty, attach) = match self.prog_type {
Some(ty) if ty!= Type::Unspec => (ty, self.expected_attach_type),
_ => prog_type_by_name(name)
.ok_or_else(|| format_err!("unexpected section name: {}", name))?,
};
let insns = unsafe {
let data = buf.as_ptr().add(sec.sh_offset as usize);
let len = sec.sh_size as usize / mem::size_of::<Insn>();
slice::from_raw_parts(data as *const _, len)
};
debug!(
"{:?} kernel program #{} @ section `{}` with {} insns",
ty,
idx,
name,
insns.len()
);
programs.push((name, ty, attach, idx, insns.to_vec()));
}
_ if sec.sh_type == SHT_REL => {}
_ => {
trace!("ignore `{}` section", name);
}
}
}
let maps = if let Some((idx, sec)) = maps_section {
self.init_maps(buf, idx, sec)?
} else {
Vec::new()
};
let mut programs = self
.resolve_program_names(programs, text_section)
.context("resolve program names")?;
self.relocate_programs(
&mut programs,
&maps,
maps_section.map(|(idx, _)| idx),
text_section,
)?;
Ok(Object {
license,
version,
programs,
maps,
})
}
fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> {
let mut maps = Vec::new();
let data = buf.get(sec.file_range()).ok_or_else(|| {
format_err!("`maps` section data {:?} out of bound", sec.file_range())
})?;
let nr_maps = self
.obj
.syms
.iter()
.filter(|sym| sym.st_shndx == idx)
.count();
let map_def_sz = data.len() / nr_maps;
for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) {
let name = self
.obj
.strtab
.get(sym.st_name)
.transpose()?
.ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?;
let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() };
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr() as *const u8,
&mut map_def as *mut _ as *mut u8,
mem::size_of::<ffi::bpf_map_def>().min(map_def_sz),
)
}
if map_def_sz > mem::size_of::<ffi::bpf_map_def>()
&& data[mem::size_of::<ffi::bpf_map_def>()..]
.iter()
.any(|&b| b!= 0)
{
bail!("maps section has unrecognized, non-zero options");
}
let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?;
debug!(
"#{} map `{}` @ section `{}`: {:?}",
maps.len(),
name,
self.resolve_name(sec.sh_name)?,
map
);
maps.push(map)
}
maps.sort_by_cached_key(|map| map.offset);
Ok(maps)
}
fn resolve_program_names(
&self,
programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>,
text_section: Option<usize>,
) -> Result<Vec<Program>, Error> {
programs
.into_iter()
.map(|(title, ty, attach, idx, insns)| {
let name = self
.resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL)
.and_then(|sym| self.resolve_name(sym.st_name))
.or_else(|_| {
if text_section == Some(idx) {
Ok(".text")
} else {
Err(format_err!("program `{}` symbol not found", title))
}
})?;
debug!(
"#{} `{:?}` program `{}` @ secion `{}` with {} insns",
idx,
ty,
name,
title,
insns.len()
);
Ok(Program::new(name, ty, attach, title, idx, insns))
})
.collect::<Result<Vec<_>, _>>()
}
fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> {
self.obj
.syms
.iter()
.find(predicate)
.ok_or_else(|| format_err!("symbol not found"))
}
fn resolve_name(&self, idx: usize) -> Result<&str, Error> {
self.obj
.strtab
.get(idx)
.ok_or_else(|| format_err!("index out of bound"))?
.map_err(|err| err.context("read string").into())
}
fn relocate_programs(
&self,
programs: &mut [Program],
maps: &[Map],
maps_idx: Option<usize>,
text_idx: Option<usize>,
) -> Result<(), Error> {
for (idx, sec) in &self.obj.shdr_relocs {
if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) {
trace!("relocate program #{} `{}`", prog.idx, prog.name);
for reloc in sec.iter() {
let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?;
trace!(
"reloc for #{}, value = {}, name = {}",
reloc.r_sym,
sym.st_value,
sym.st_name
);
if Some(sym.st_shndx)!= maps_idx && Some(sym.st_shndx)!= text_idx {
bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx);
}
let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>();
trace!("reloc insn #{}", insn_idx);
if Opcode::from_bits_truncate(prog.insns[insn_idx].code)
!= Opcode::LD | Opcode::IMM | Opcode::DW
{
bail!(
"invalid relocate for insns[{}].code = {:?}",
insn_idx,
prog.insns[insn_idx].code
);
}
let map_idx = maps
.iter()
.position(|map| map.offset == sym.st_value as usize)
.ok_or_else(|| format_err!("map @ {} not found", sym.st_value))?;
prog.relocs.push(prog::Reloc::LD64 { insn_idx, map_idx })
}
}
}
Ok(())
}
}
| {
license = Some(
CStr::from_bytes_with_nul(section_data()?)?
.to_str()?
.to_owned(),
);
debug!("kernel license: {}", license.as_ref().unwrap());
} | conditional_block |
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! A row or column with run-time adjustable contents
use std::ops::{Index, IndexMut};
use kas::dir::{Down, Right};
use kas::layout::{self, RulesSetter, RulesSolver};
use kas::{event, prelude::*};
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Right, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Down, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Right, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Down, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Widget<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_child_mut(&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if!self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) |
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if!self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, index: usize, mut widget: W) -> (W, TkAction) {
std::mem::swap(&mut widget, &mut self.widgets[index]);
(widget, TkAction::RECONFIGURE)
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) -> TkAction {
let len = self.widgets.len();
self.widgets.extend(iter);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, len: usize, f: F) -> TkAction {
let l0 = self.widgets.len();
if l0 == len {
return TkAction::empty();
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
TkAction::RECONFIGURE
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, f: F) -> TkAction {
let len = self.widgets.len();
self.widgets.retain(f);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &W> {
ListIter {
list: &self.widgets,
}
}
/// Get the index of the child which is an ancestor of `id`, if any
pub fn find_child_index(&self, id: WidgetId) -> Option<usize> {
if id >= self.first_id {
for (i, child) in self.widgets.iter().enumerate() {
if id <= child.id() {
return Some(i);
}
}
}
None
}
}
impl<D: Directional, W: Widget> Index<usize> for List<D, W> {
type Output = W;
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<D: Directional, W: Widget> IndexMut<usize> for List<D, W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct ListIter<'a, W: Widget> {
list: &'a [W],
}
impl<'a, W: Widget> Iterator for ListIter<'a, W> {
type Item = &'a W;
fn next(&mut self) -> Option<Self::Item> {
if!self.list.is_empty() {
let item = &self.list[0];
self.list = &self.list[1..];
Some(item)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<'a, W: Widget> ExactSizeIterator for ListIter<'a, W> {
fn len(&self) -> usize {
self.list.len()
}
}
| {
return child.find_id(coord);
} | conditional_block |
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! A row or column with run-time adjustable contents
use std::ops::{Index, IndexMut};
use kas::dir::{Down, Right};
use kas::layout::{self, RulesSetter, RulesSolver};
use kas::{event, prelude::*};
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Right, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Down, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Right, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Down, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Widget<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn | (&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if!self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if!self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, index: usize, mut widget: W) -> (W, TkAction) {
std::mem::swap(&mut widget, &mut self.widgets[index]);
(widget, TkAction::RECONFIGURE)
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) -> TkAction {
let len = self.widgets.len();
self.widgets.extend(iter);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, len: usize, f: F) -> TkAction {
let l0 = self.widgets.len();
if l0 == len {
return TkAction::empty();
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
TkAction::RECONFIGURE
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, f: F) -> TkAction {
let len = self.widgets.len();
self.widgets.retain(f);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &W> {
ListIter {
list: &self.widgets,
}
}
/// Get the index of the child which is an ancestor of `id`, if any
pub fn find_child_index(&self, id: WidgetId) -> Option<usize> {
if id >= self.first_id {
for (i, child) in self.widgets.iter().enumerate() {
if id <= child.id() {
return Some(i);
}
}
}
None
}
}
impl<D: Directional, W: Widget> Index<usize> for List<D, W> {
type Output = W;
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<D: Directional, W: Widget> IndexMut<usize> for List<D, W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct ListIter<'a, W: Widget> {
list: &'a [W],
}
impl<'a, W: Widget> Iterator for ListIter<'a, W> {
type Item = &'a W;
fn next(&mut self) -> Option<Self::Item> {
if!self.list.is_empty() {
let item = &self.list[0];
self.list = &self.list[1..];
Some(item)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<'a, W: Widget> ExactSizeIterator for ListIter<'a, W> {
fn len(&self) -> usize {
self.list.len()
}
}
| get_child_mut | identifier_name |
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! A row or column with run-time adjustable contents
use std::ops::{Index, IndexMut};
use kas::dir::{Down, Right};
use kas::layout::{self, RulesSetter, RulesSolver};
use kas::{event, prelude::*};
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Right, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Down, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Right, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Down, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Widget<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_child_mut(&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if!self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if!self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, index: usize, mut widget: W) -> (W, TkAction) {
std::mem::swap(&mut widget, &mut self.widgets[index]);
(widget, TkAction::RECONFIGURE)
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) -> TkAction {
let len = self.widgets.len();
self.widgets.extend(iter);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, len: usize, f: F) -> TkAction {
let l0 = self.widgets.len();
if l0 == len {
return TkAction::empty();
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
TkAction::RECONFIGURE
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, f: F) -> TkAction {
let len = self.widgets.len();
self.widgets.retain(f);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &W> {
ListIter {
list: &self.widgets,
}
}
/// Get the index of the child which is an ancestor of `id`, if any
pub fn find_child_index(&self, id: WidgetId) -> Option<usize> {
if id >= self.first_id {
for (i, child) in self.widgets.iter().enumerate() {
if id <= child.id() {
return Some(i);
}
}
}
None
}
}
impl<D: Directional, W: Widget> Index<usize> for List<D, W> {
type Output = W;
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<D: Directional, W: Widget> IndexMut<usize> for List<D, W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct ListIter<'a, W: Widget> {
list: &'a [W],
} | self.list = &self.list[1..];
Some(item)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<'a, W: Widget> ExactSizeIterator for ListIter<'a, W> {
fn len(&self) -> usize {
self.list.len()
}
} | impl<'a, W: Widget> Iterator for ListIter<'a, W> {
type Item = &'a W;
fn next(&mut self) -> Option<Self::Item> {
if !self.list.is_empty() {
let item = &self.list[0]; | random_line_split |
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! A row or column with run-time adjustable contents
use std::ops::{Index, IndexMut};
use kas::dir::{Down, Right};
use kas::layout::{self, RulesSetter, RulesSolver};
use kas::{event, prelude::*};
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Right, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Down, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Right, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Down, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Widget<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug, Widget)]
#[handler(send=noauto, msg=(usize, <W as event::Handler>::Msg))]
#[widget(children=noauto)]
pub struct List<D: Directional, W: Widget> {
first_id: WidgetId,
#[widget_core]
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
impl<D: Directional, W: Widget> WidgetChildren for List<D, W> {
#[inline]
fn first_id(&self) -> WidgetId {
self.first_id
}
fn record_first_id(&mut self, id: WidgetId) {
self.first_id = id;
}
#[inline]
fn num_children(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get_child(&self, index: usize) -> Option<&dyn WidgetConfig> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_child_mut(&mut self, index: usize) -> Option<&mut dyn WidgetConfig> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let dim = (self.direction, self.widgets.len());
let mut solver = layout::RowSolver::new(axis, dim, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data)
}
fn set_rect(&mut self, mgr: &mut Manager, rect: Rect, align: AlignHints) {
self.core.rect = rect;
let dim = (self.direction, self.widgets.len());
let mut setter = layout::RowSetter::<D, Vec<i32>, _>::new(rect, dim, align, &mut self.data);
for (n, child) in self.widgets.iter_mut().enumerate() {
child.set_rect(mgr, setter.child_rect(&mut self.data, n), align);
}
}
fn spatial_nav(&self, reverse: bool, from: Option<usize>) -> Option<usize> {
if self.num_children() == 0 {
return None;
}
let last = self.num_children() - 1;
let reverse = reverse ^ self.direction.is_reversed();
if let Some(index) = from {
match reverse {
false if index < last => Some(index + 1),
true if 0 < index => Some(index - 1),
_ => None,
}
} else {
match reverse {
false => Some(0),
true => Some(last),
}
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
if!self.rect().contains(coord) {
return None;
}
let solver = layout::RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
Some(self.id())
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &event::ManagerState, disabled: bool) {
let disabled = disabled || self.is_disabled();
let solver = layout::RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.get_clip_rect(), |w| {
w.draw(draw_handle, mgr, disabled)
});
}
}
impl<D: Directional, W: Widget> event::SendEvent for List<D, W> {
fn send(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
if!self.is_disabled() {
for (i, child) in self.widgets.iter_mut().enumerate() {
if id <= child.id() {
let r = child.send(mgr, id, event);
return match Response::try_from(r) {
Ok(r) => r,
Err(msg) => {
log::trace!(
"Received by {} from {}: {:?}",
self.id(),
id,
kas::util::TryFormat(&msg)
);
Response::Msg((i, msg))
}
};
}
}
}
Response::Unhandled
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<W: Widget> List<Direction, W> {
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.direction = direction;
// Note: most of the time SET_SIZE would be enough, but margins can be different
TkAction::RESIZE
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
first_id: Default::default(),
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.direction.as_direction()
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize |
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self) -> TkAction {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
self.widgets.clear();
action
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, widget: W) -> TkAction {
self.widgets.push(widget);
TkAction::RECONFIGURE
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self) -> (Option<W>, TkAction) {
let action = match self.widgets.is_empty() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
};
(self.widgets.pop(), action)
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, index: usize, widget: W) -> TkAction {
self.widgets.insert(index, widget);
TkAction::RECONFIGURE
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, index: usize) -> (W, TkAction) {
let r = self.widgets.remove(index);
(r, TkAction::RECONFIGURE)
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, index: usize, mut widget: W) -> (W, TkAction) {
std::mem::swap(&mut widget, &mut self.widgets[index]);
(widget, TkAction::RECONFIGURE)
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, iter: T) -> TkAction {
let len = self.widgets.len();
self.widgets.extend(iter);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, len: usize, f: F) -> TkAction {
let l0 = self.widgets.len();
if l0 == len {
return TkAction::empty();
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
TkAction::RECONFIGURE
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, f: F) -> TkAction {
let len = self.widgets.len();
self.widgets.retain(f);
match len == self.widgets.len() {
true => TkAction::empty(),
false => TkAction::RECONFIGURE,
}
}
/// Iterate over childern
pub fn iter(&self) -> impl Iterator<Item = &W> {
ListIter {
list: &self.widgets,
}
}
/// Get the index of the child which is an ancestor of `id`, if any
pub fn find_child_index(&self, id: WidgetId) -> Option<usize> {
if id >= self.first_id {
for (i, child) in self.widgets.iter().enumerate() {
if id <= child.id() {
return Some(i);
}
}
}
None
}
}
impl<D: Directional, W: Widget> Index<usize> for List<D, W> {
type Output = W;
fn index(&self, index: usize) -> &Self::Output {
&self.widgets[index]
}
}
impl<D: Directional, W: Widget> IndexMut<usize> for List<D, W> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.widgets[index]
}
}
struct ListIter<'a, W: Widget> {
list: &'a [W],
}
impl<'a, W: Widget> Iterator for ListIter<'a, W> {
type Item = &'a W;
fn next(&mut self) -> Option<Self::Item> {
if!self.list.is_empty() {
let item = &self.list[0];
self.list = &self.list[1..];
Some(item)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<'a, W: Widget> ExactSizeIterator for ListIter<'a, W> {
fn len(&self) -> usize {
self.list.len()
}
}
| {
self.widgets.capacity()
} | identifier_body |
lib.rs | // This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # WARNING
//!
//! **DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.**
//!
//! # Glutton Pallet
//!
//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the
//! `Compute` and `Storage` parameters the pallet consumes the adequate amount
//! of weight.
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub mod weights;
use blake2::{Blake2b512, Digest};
use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound};
use frame_system::pallet_prelude::*;
use sp_io::hashing::twox_256;
use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64};
use sp_std::{vec, vec::Vec};
pub use pallet::*;
pub use weights::WeightInfo;
/// The size of each value in the `TrashData` storage in bytes.
pub const VALUE_SIZE: usize = 1024;
/// Max number of entries for the `TrashData` map.
pub const MAX_TRASH_DATA_ENTRIES: u32 = 65_000;
/// Hard limit for any other resource limit (in units).
pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else {
(new_count..current_count).for_each(TrashData::<T>::remove);
}
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
}
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(storage <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Storage::<T>::set(storage);
Self::deposit_event(Event::StorageLimitSet { storage });
Ok(())
}
}
impl<T: Config> Pallet<T> {
/// Waste at most the remaining proof size of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_proof_size_some(n));
(0..n).for_each(|i| {
TrashData::<T>::get(i);
});
}
/// Calculate how many times `waste_proof_size_some` should be called to fill up `meter`.
fn calculate_proof_size_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_proof_size_some(0);
let slope = T::WeightInfo::waste_proof_size_some(1).saturating_sub(base);
let remaining = meter.remaining().saturating_sub(base);
let iter_by_proof_size =
remaining.proof_size().checked_div(slope.proof_size()).ok_or(())?;
let iter_by_ref_time = remaining.ref_time().checked_div(slope.ref_time()).ok_or(())?;
if iter_by_proof_size > 0 && iter_by_proof_size <= iter_by_ref_time {
Ok(iter_by_proof_size as u32)
} else |
}
/// Waste at most the remaining ref time weight of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_ref_time(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_ref_time_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_ref_time_iter(n));
let clobber = Self::waste_ref_time_iter(vec![0u8; 64], n);
// By casting it into a vec we can hopefully prevent the compiler from optimizing it
// out. Note that `Blake2b512` produces 64 bytes, this is therefore impossible - but the
// compiler does not know that (hopefully).
debug_assert!(clobber.len() == 64);
if clobber.len() == 65 {
TrashData::<T>::insert(0, [clobber[0] as u8; VALUE_SIZE]);
}
}
/// Wastes some `ref_time`. Receives the previous result as an argument.
///
/// The ref_time of one iteration should be in the order of 1-10 ms.
pub(crate) fn waste_ref_time_iter(clobber: Vec<u8>, i: u32) -> Vec<u8> {
let mut hasher = Blake2b512::new();
// Blake2 has a very high speed of hashing so we make multiple hashes with it to
// waste more `ref_time` at once.
(0..i).for_each(|_| {
hasher.update(clobber.as_slice());
});
hasher.finalize().to_vec()
}
/// Calculate how many times `waste_ref_time_iter` should be called to fill up `meter`.
fn calculate_ref_time_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_ref_time_iter(0);
let slope = T::WeightInfo::waste_ref_time_iter(1).saturating_sub(base);
if!slope.proof_size().is_zero() ||!base.proof_size().is_zero() {
return Err(())
}
match meter
.remaining()
.ref_time()
.saturating_sub(base.ref_time())
.checked_div(slope.ref_time())
{
Some(0) | None => Err(()),
Some(i) => Ok(i as u32),
}
}
/// Generate a pseudo-random deterministic value from a `seed`.
pub(crate) fn gen_value(seed: u32) -> [u8; VALUE_SIZE] {
let mut ret = [0u8; VALUE_SIZE];
for i in 0u32..(VALUE_SIZE as u32 / 32) {
let hash = (seed, i).using_encoded(twox_256);
ret[i as usize * 32..(i + 1) as usize * 32].copy_from_slice(&hash);
}
ret
}
}
}
| {
Err(())
} | conditional_block |
lib.rs | // This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # WARNING
//!
//! **DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.**
//!
//! # Glutton Pallet
//!
//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the
//! `Compute` and `Storage` parameters the pallet consumes the adequate amount
//! of weight.
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub mod weights;
use blake2::{Blake2b512, Digest};
use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound};
use frame_system::pallet_prelude::*;
use sp_io::hashing::twox_256;
use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64};
use sp_std::{vec, vec::Vec};
pub use pallet::*;
pub use weights::WeightInfo;
/// The size of each value in the `TrashData` storage in bytes.
pub const VALUE_SIZE: usize = 1024;
/// Max number of entries for the `TrashData` map.
pub const MAX_TRASH_DATA_ENTRIES: u32 = 65_000;
/// Hard limit for any other resource limit (in units).
pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else {
(new_count..current_count).for_each(TrashData::<T>::remove);
}
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult |
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(storage <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Storage::<T>::set(storage);
Self::deposit_event(Event::StorageLimitSet { storage });
Ok(())
}
}
impl<T: Config> Pallet<T> {
/// Waste at most the remaining proof size of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_proof_size_some(n));
(0..n).for_each(|i| {
TrashData::<T>::get(i);
});
}
/// Calculate how many times `waste_proof_size_some` should be called to fill up `meter`.
fn calculate_proof_size_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_proof_size_some(0);
let slope = T::WeightInfo::waste_proof_size_some(1).saturating_sub(base);
let remaining = meter.remaining().saturating_sub(base);
let iter_by_proof_size =
remaining.proof_size().checked_div(slope.proof_size()).ok_or(())?;
let iter_by_ref_time = remaining.ref_time().checked_div(slope.ref_time()).ok_or(())?;
if iter_by_proof_size > 0 && iter_by_proof_size <= iter_by_ref_time {
Ok(iter_by_proof_size as u32)
} else {
Err(())
}
}
/// Waste at most the remaining ref time weight of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_ref_time(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_ref_time_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_ref_time_iter(n));
let clobber = Self::waste_ref_time_iter(vec![0u8; 64], n);
// By casting it into a vec we can hopefully prevent the compiler from optimizing it
// out. Note that `Blake2b512` produces 64 bytes, this is therefore impossible - but the
// compiler does not know that (hopefully).
debug_assert!(clobber.len() == 64);
if clobber.len() == 65 {
TrashData::<T>::insert(0, [clobber[0] as u8; VALUE_SIZE]);
}
}
/// Wastes some `ref_time`. Receives the previous result as an argument.
///
/// The ref_time of one iteration should be in the order of 1-10 ms.
pub(crate) fn waste_ref_time_iter(clobber: Vec<u8>, i: u32) -> Vec<u8> {
let mut hasher = Blake2b512::new();
// Blake2 has a very high speed of hashing so we make multiple hashes with it to
// waste more `ref_time` at once.
(0..i).for_each(|_| {
hasher.update(clobber.as_slice());
});
hasher.finalize().to_vec()
}
/// Calculate how many times `waste_ref_time_iter` should be called to fill up `meter`.
fn calculate_ref_time_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_ref_time_iter(0);
let slope = T::WeightInfo::waste_ref_time_iter(1).saturating_sub(base);
if!slope.proof_size().is_zero() ||!base.proof_size().is_zero() {
return Err(())
}
match meter
.remaining()
.ref_time()
.saturating_sub(base.ref_time())
.checked_div(slope.ref_time())
{
Some(0) | None => Err(()),
Some(i) => Ok(i as u32),
}
}
/// Generate a pseudo-random deterministic value from a `seed`.
pub(crate) fn gen_value(seed: u32) -> [u8; VALUE_SIZE] {
let mut ret = [0u8; VALUE_SIZE];
for i in 0u32..(VALUE_SIZE as u32 / 32) {
let hash = (seed, i).using_encoded(twox_256);
ret[i as usize * 32..(i + 1) as usize * 32].copy_from_slice(&hash);
}
ret
}
}
}
| {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
} | identifier_body |
lib.rs | // This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # WARNING
//!
//! **DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.**
//!
//! # Glutton Pallet
//!
//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the
//! `Compute` and `Storage` parameters the pallet consumes the adequate amount
//! of weight.
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub mod weights;
use blake2::{Blake2b512, Digest};
use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound};
use frame_system::pallet_prelude::*;
use sp_io::hashing::twox_256;
use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64};
use sp_std::{vec, vec::Vec};
pub use pallet::*;
pub use weights::WeightInfo;
/// The size of each value in the `TrashData` storage in bytes.
pub const VALUE_SIZE: usize = 1024;
/// Max number of entries for the `TrashData` map.
pub const MAX_TRASH_DATA_ENTRIES: u32 = 65_000;
/// Hard limit for any other resource limit (in units).
pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else { | }
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
}
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn set_storage(origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(storage <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Storage::<T>::set(storage);
Self::deposit_event(Event::StorageLimitSet { storage });
Ok(())
}
}
impl<T: Config> Pallet<T> {
/// Waste at most the remaining proof size of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_proof_size_some(n));
(0..n).for_each(|i| {
TrashData::<T>::get(i);
});
}
/// Calculate how many times `waste_proof_size_some` should be called to fill up `meter`.
fn calculate_proof_size_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_proof_size_some(0);
let slope = T::WeightInfo::waste_proof_size_some(1).saturating_sub(base);
let remaining = meter.remaining().saturating_sub(base);
let iter_by_proof_size =
remaining.proof_size().checked_div(slope.proof_size()).ok_or(())?;
let iter_by_ref_time = remaining.ref_time().checked_div(slope.ref_time()).ok_or(())?;
if iter_by_proof_size > 0 && iter_by_proof_size <= iter_by_ref_time {
Ok(iter_by_proof_size as u32)
} else {
Err(())
}
}
/// Waste at most the remaining ref time weight of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_ref_time(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_ref_time_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_ref_time_iter(n));
let clobber = Self::waste_ref_time_iter(vec![0u8; 64], n);
// By casting it into a vec we can hopefully prevent the compiler from optimizing it
// out. Note that `Blake2b512` produces 64 bytes, this is therefore impossible - but the
// compiler does not know that (hopefully).
debug_assert!(clobber.len() == 64);
if clobber.len() == 65 {
TrashData::<T>::insert(0, [clobber[0] as u8; VALUE_SIZE]);
}
}
/// Wastes some `ref_time`. Receives the previous result as an argument.
///
/// The ref_time of one iteration should be in the order of 1-10 ms.
pub(crate) fn waste_ref_time_iter(clobber: Vec<u8>, i: u32) -> Vec<u8> {
let mut hasher = Blake2b512::new();
// Blake2 has a very high speed of hashing so we make multiple hashes with it to
// waste more `ref_time` at once.
(0..i).for_each(|_| {
hasher.update(clobber.as_slice());
});
hasher.finalize().to_vec()
}
/// Calculate how many times `waste_ref_time_iter` should be called to fill up `meter`.
fn calculate_ref_time_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_ref_time_iter(0);
let slope = T::WeightInfo::waste_ref_time_iter(1).saturating_sub(base);
if!slope.proof_size().is_zero() ||!base.proof_size().is_zero() {
return Err(())
}
match meter
.remaining()
.ref_time()
.saturating_sub(base.ref_time())
.checked_div(slope.ref_time())
{
Some(0) | None => Err(()),
Some(i) => Ok(i as u32),
}
}
/// Generate a pseudo-random deterministic value from a `seed`.
pub(crate) fn gen_value(seed: u32) -> [u8; VALUE_SIZE] {
let mut ret = [0u8; VALUE_SIZE];
for i in 0u32..(VALUE_SIZE as u32 / 32) {
let hash = (seed, i).using_encoded(twox_256);
ret[i as usize * 32..(i + 1) as usize * 32].copy_from_slice(&hash);
}
ret
}
}
} | (new_count..current_count).for_each(TrashData::<T>::remove); | random_line_split |
lib.rs | // This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # WARNING
//!
//! **DO NOT USE ON VALUE-BEARING CHAINS. THIS PALLET IS ONLY INTENDED FOR TESTING USAGE.**
//!
//! # Glutton Pallet
//!
//! Pallet that consumes `ref_time` and `proof_size` of a block. Based on the
//! `Compute` and `Storage` parameters the pallet consumes the adequate amount
//! of weight.
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub mod weights;
use blake2::{Blake2b512, Digest};
use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound};
use frame_system::pallet_prelude::*;
use sp_io::hashing::twox_256;
use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64};
use sp_std::{vec, vec::Vec};
pub use pallet::*;
pub use weights::WeightInfo;
/// The size of each value in the `TrashData` storage in bytes.
pub const VALUE_SIZE: usize = 1024;
/// Max number of entries for the `TrashData` map.
pub const MAX_TRASH_DATA_ENTRIES: u32 = 65_000;
/// Hard limit for any other resource limit (in units).
pub const RESOURCE_HARD_LIMIT: FixedU64 = FixedU64::from_u32(10);
#[frame_support::pallet]
pub mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// The admin origin that can set computational limits and initialize the pallet.
type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
/// Weight information for this pallet.
type WeightInfo: WeightInfo;
}
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event {
/// The pallet has been (re)initialized.
PalletInitialized {
/// Whether the pallet has been re-initialized.
reinit: bool,
},
/// The computation limit has been updated.
ComputationLimitSet {
/// The computation limit.
compute: FixedU64,
},
/// The storage limit has been updated.
StorageLimitSet {
/// The storage limit.
storage: FixedU64,
},
}
#[pallet::error]
pub enum Error<T> {
/// The pallet was already initialized.
///
/// Set `witness_count` to `Some` to bypass this error.
AlreadyInitialized,
/// The limit was over [`crate::RESOURCE_HARD_LIMIT`].
InsaneLimit,
}
/// The proportion of the remaining `ref_time` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Compute<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// The proportion of the remaining `proof_size` to consume during `on_idle`.
///
/// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to
/// over `1.0` could stall the chain.
#[pallet::storage]
pub(crate) type Storage<T: Config> = StorageValue<_, FixedU64, ValueQuery>;
/// Storage map used for wasting proof size.
///
/// It contains no meaningful data - hence the name "Trash". The maximal number of entries is
/// set to 65k, which is just below the next jump at 16^4. This is important to reduce the proof
/// size benchmarking overestimate. The assumption here is that we won't have more than 65k *
/// 1KiB = 65MiB of proof size wasting in practice. However, this limit is not enforced, so the
/// pallet would also work out of the box with more entries, but its benchmarked proof weight
/// would possibly be underestimated in that case.
#[pallet::storage]
pub(super) type TrashData<T: Config> = StorageMap<
Hasher = Twox64Concat,
Key = u32,
Value = [u8; VALUE_SIZE],
QueryKind = OptionQuery,
MaxValues = ConstU32<MAX_TRASH_DATA_ENTRIES>,
>;
/// The current number of entries in `TrashData`.
#[pallet::storage]
pub(crate) type TrashDataCount<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
/// The compute limit.
pub compute: FixedU64,
/// The storage limit.
pub storage: FixedU64,
/// The amount of trash data for wasting proof size.
pub trash_data_count: u32,
#[serde(skip)]
/// The required configuration field.
pub _config: sp_std::marker::PhantomData<T>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
assert!(
self.trash_data_count <= MAX_TRASH_DATA_ENTRIES,
"number of TrashData entries cannot be bigger than {:?}",
MAX_TRASH_DATA_ENTRIES
);
(0..self.trash_data_count)
.for_each(|i| TrashData::<T>::insert(i, Pallet::<T>::gen_value(i)));
TrashDataCount::<T>::set(self.trash_data_count);
assert!(self.compute <= RESOURCE_HARD_LIMIT, "Compute limit is insane");
<Compute<T>>::put(self.compute);
assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane");
<Storage<T>>::put(self.storage);
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn integrity_test() {
assert!(
!T::WeightInfo::waste_ref_time_iter(1).ref_time().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
assert!(
!T::WeightInfo::waste_proof_size_some(1).proof_size().is_zero(),
"Weight zero; would get stuck in an infinite loop"
);
}
fn on_idle(_: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let mut meter = WeightMeter::from_limit(remaining_weight);
if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() {
return T::WeightInfo::empty_on_idle()
}
let proof_size_limit =
Storage::<T>::get().saturating_mul_int(meter.remaining().proof_size());
let computation_weight_limit =
Compute::<T>::get().saturating_mul_int(meter.remaining().ref_time());
let mut meter = WeightMeter::from_limit(Weight::from_parts(
computation_weight_limit,
proof_size_limit,
));
Self::waste_at_most_proof_size(&mut meter);
Self::waste_at_most_ref_time(&mut meter);
meter.consumed()
}
}
#[pallet::call(weight = T::WeightInfo)]
impl<T: Config> Pallet<T> {
/// Initialize the pallet. Should be called once, if no genesis state was provided.
///
/// `current_count` is the current number of elements in `TrashData`. This can be set to
/// `None` when the pallet is first initialized.
///
/// Only callable by Root or `AdminOrigin`. A good default for `new_count` is `5_000`.
#[pallet::call_index(0)]
#[pallet::weight(
T::WeightInfo::initialize_pallet_grow(witness_count.unwrap_or_default())
.max(T::WeightInfo::initialize_pallet_shrink(witness_count.unwrap_or_default()))
)]
pub fn initialize_pallet(
origin: OriginFor<T>,
new_count: u32,
witness_count: Option<u32>,
) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
let current_count = TrashDataCount::<T>::get();
ensure!(
current_count == witness_count.unwrap_or_default(),
Error::<T>::AlreadyInitialized
);
if new_count > current_count {
(current_count..new_count)
.for_each(|i| TrashData::<T>::insert(i, Self::gen_value(i)));
} else {
(new_count..current_count).for_each(TrashData::<T>::remove);
}
Self::deposit_event(Event::PalletInitialized { reinit: witness_count.is_some() });
TrashDataCount::<T>::set(new_count);
Ok(())
}
/// Set how much of the remaining `ref_time` weight should be consumed by `on_idle`.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(1)]
pub fn set_compute(origin: OriginFor<T>, compute: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(compute <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Compute::<T>::set(compute);
Self::deposit_event(Event::ComputationLimitSet { compute });
Ok(())
}
/// Set how much of the remaining `proof_size` weight should be consumed by `on_idle`.
///
/// `1.0` means that all remaining `proof_size` will be consumed. The PoV benchmarking
/// results that are used here are likely an over-estimation. 100% intended consumption will
/// therefore translate to less than 100% actual consumption.
///
/// Only callable by Root or `AdminOrigin`.
#[pallet::call_index(2)]
pub fn | (origin: OriginFor<T>, storage: FixedU64) -> DispatchResult {
T::AdminOrigin::ensure_origin_or_root(origin)?;
ensure!(storage <= RESOURCE_HARD_LIMIT, Error::<T>::InsaneLimit);
Storage::<T>::set(storage);
Self::deposit_event(Event::StorageLimitSet { storage });
Ok(())
}
}
impl<T: Config> Pallet<T> {
/// Waste at most the remaining proof size of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_proof_size(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_proof_size_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_proof_size_some(n));
(0..n).for_each(|i| {
TrashData::<T>::get(i);
});
}
/// Calculate how many times `waste_proof_size_some` should be called to fill up `meter`.
fn calculate_proof_size_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_proof_size_some(0);
let slope = T::WeightInfo::waste_proof_size_some(1).saturating_sub(base);
let remaining = meter.remaining().saturating_sub(base);
let iter_by_proof_size =
remaining.proof_size().checked_div(slope.proof_size()).ok_or(())?;
let iter_by_ref_time = remaining.ref_time().checked_div(slope.ref_time()).ok_or(())?;
if iter_by_proof_size > 0 && iter_by_proof_size <= iter_by_ref_time {
Ok(iter_by_proof_size as u32)
} else {
Err(())
}
}
/// Waste at most the remaining ref time weight of `meter`.
///
/// Tries to come as close to the limit as possible.
pub(crate) fn waste_at_most_ref_time(meter: &mut WeightMeter) {
let Ok(n) = Self::calculate_ref_time_iters(&meter) else { return };
meter.consume(T::WeightInfo::waste_ref_time_iter(n));
let clobber = Self::waste_ref_time_iter(vec![0u8; 64], n);
// By casting it into a vec we can hopefully prevent the compiler from optimizing it
// out. Note that `Blake2b512` produces 64 bytes, this is therefore impossible - but the
// compiler does not know that (hopefully).
debug_assert!(clobber.len() == 64);
if clobber.len() == 65 {
TrashData::<T>::insert(0, [clobber[0] as u8; VALUE_SIZE]);
}
}
/// Wastes some `ref_time`. Receives the previous result as an argument.
///
/// The ref_time of one iteration should be in the order of 1-10 ms.
pub(crate) fn waste_ref_time_iter(clobber: Vec<u8>, i: u32) -> Vec<u8> {
let mut hasher = Blake2b512::new();
// Blake2 has a very high speed of hashing so we make multiple hashes with it to
// waste more `ref_time` at once.
(0..i).for_each(|_| {
hasher.update(clobber.as_slice());
});
hasher.finalize().to_vec()
}
/// Calculate how many times `waste_ref_time_iter` should be called to fill up `meter`.
fn calculate_ref_time_iters(meter: &WeightMeter) -> Result<u32, ()> {
let base = T::WeightInfo::waste_ref_time_iter(0);
let slope = T::WeightInfo::waste_ref_time_iter(1).saturating_sub(base);
if!slope.proof_size().is_zero() ||!base.proof_size().is_zero() {
return Err(())
}
match meter
.remaining()
.ref_time()
.saturating_sub(base.ref_time())
.checked_div(slope.ref_time())
{
Some(0) | None => Err(()),
Some(i) => Ok(i as u32),
}
}
/// Generate a pseudo-random deterministic value from a `seed`.
pub(crate) fn gen_value(seed: u32) -> [u8; VALUE_SIZE] {
let mut ret = [0u8; VALUE_SIZE];
for i in 0u32..(VALUE_SIZE as u32 / 32) {
let hash = (seed, i).using_encoded(twox_256);
ret[i as usize * 32..(i + 1) as usize * 32].copy_from_slice(&hash);
}
ret
}
}
}
| set_storage | identifier_name |
freelist.rs | use types::{txid_t, pgid_t};
use page::{Page, get_page_header_size, merge_pgids, merge_pgids_raw, FREELIST_PAGE_FLAG};
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::slice;
// FreeList represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
pub struct FreeList {
pub ids: Vec<pgid_t>, // all free and available free page ids
pub pending: HashMap<txid_t, Vec<pgid_t>>, // mapping of soon-to-be free page ids by tx
pub cache: HashSet<pgid_t>, // fast lookup of all free and pending page ids
}
impl FreeList {
pub fn new() -> FreeList {
FreeList{
ids: vec![],
pending: HashMap::new(),
cache: HashSet::new(),
}
}
// returns the size of the page after serialization.
pub fn size(&self) -> usize {
let mut n = self.count();
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n += 1;
}
get_page_header_size() + mem::size_of::<pgid_t>() * n
}
pub fn count(&self) -> usize {
self.free_count() + self.pending_count()
}
pub fn free_count(&self) -> usize {
self.ids.len()
}
pub fn pending_count(&self) -> usize {
let mut count: usize = 0;
for (_, val) in self.pending.iter() {
count += val.len()
}
count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
pub fn copyall(&self, dst: *mut pgid_t) {
let mut m = Vec::with_capacity(self.pending_count());
for (_, list) in self.pending.iter() {
let mut copy_list = list.to_vec();
m.append(&mut copy_list);
}
m.sort();
merge_pgids_raw(dst, &self.ids, &m);
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd!= 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t.. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if!self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 | else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex();
}
// writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr);
} else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if!pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f = FreeList::new();
let page1 = Rc::new(RefCell::new(Page {
id: 12,
flags: 0,
count: 0,
overflow: 1,
ptr: 0,
}));
f.free(100, Rc::clone(&page1));
let page2 = Rc::new(RefCell::new(Page {
id: 9,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page2));
let page3 = Rc::new(RefCell::new(Page {
id: 39,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(102, Rc::clone(&page3));
f.release(100);
f.release(101);
assert_eq!(f.ids, vec![9,12,13]);
f.release(102);
assert_eq!(f.ids, vec![9,12,13, 39]);
}
#[test]
fn freelist_allocate() {
let mut f = FreeList {
ids: vec![3,4,5,6,7,9,12,13,18],
pending: HashMap::new(),
cache: HashSet::new(),
};
assert_eq!(f.allocate(3), 3);
assert_eq!(f.allocate(1), 6);
assert_eq!(f.allocate(3), 0);
assert_eq!(f.allocate(2), 12);
assert_eq!(f.allocate(1), 7);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.ids, vec![9,18]);
assert_eq!(f.allocate(1), 9);
assert_eq!(f.allocate(1), 18);
assert_eq!(f.allocate(1), 0);
assert_eq!(f.ids, vec![]);
}
#[test]
fn freelist_read() {
// Create a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
unsafe {
(*page).flags = FREELIST_PAGE_FLAG;
(*page).count = 2;
}
// Insert 2 page ids
let ids_ptr: *mut pgid_t = unsafe {
&mut (*page).ptr as *mut usize as *mut pgid_t
};
unsafe {
*ids_ptr = 23;
*ids_ptr.offset(1) = 50;
}
// Deserialize page into a freelist.
let mut f = FreeList::new();
unsafe {
f.read(&(*page));
}
// Ensure that there are two page ids in the freelist.
assert_eq!(f.ids, vec![23, 50]);
}
#[test]
fn freelist_write() {
// Create a freelist and write it to a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
let mut f = FreeList {
ids: vec![12, 39],
pending: HashMap::new(),
cache: HashSet::new(),
};
f.pending.insert(100, vec![28, 11]);
f.pending.insert(101, vec![3]);
unsafe { f.write(page.as_mut().unwrap()); };
// Read the page back out
let mut f2 = FreeList::new();
let p_const = page as *const Page;
unsafe {
f2.read(&(*p_const));
}
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
assert_eq!(f2.ids, vec![3, 11, 12, 28, 39]);
}
#[bench]
fn bench_freelis_release_10k(b: &mut Bencher) {
bench_freelist_release(b, 10000);
}
#[bench]
fn bench_freelis_release_100k(b: &mut Bencher) {
bench_freelist_release(b, 100000);
}
#[bench]
fn bench_freelis_release_1000k(b: &mut Bencher) {
bench_freelist_release(b, 1000000);
}
#[bench]
fn bench_freelis_release_10000k(b: &mut Bencher) {
bench_freelist_release(b, 10000000);
}
fn bench_freelist_release(b: &mut Bencher, size: usize) {
let ids = random_pgids(size);
let pending = random_pgids(ids.len() / 400);
b.iter(|| {
let mut f = FreeList::new();
f.ids.append(&mut ids.to_vec());
f.pending.insert(1, pending.to_vec());
f.release(1)
});
}
fn random_pgids(n: usize) -> Vec<pgid_t> {
let mut result: Vec<pgid_t> = Vec::with_capacity(n);
for i in 0..n {
result.push(rand::random::<pgid_t>());
}
result
}
}
| {
self.ids.clear();
} | conditional_block |
freelist.rs | use types::{txid_t, pgid_t};
use page::{Page, get_page_header_size, merge_pgids, merge_pgids_raw, FREELIST_PAGE_FLAG};
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::slice;
// FreeList represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
pub struct FreeList {
pub ids: Vec<pgid_t>, // all free and available free page ids
pub pending: HashMap<txid_t, Vec<pgid_t>>, // mapping of soon-to-be free page ids by tx
pub cache: HashSet<pgid_t>, // fast lookup of all free and pending page ids
}
impl FreeList {
pub fn new() -> FreeList {
FreeList{
ids: vec![],
pending: HashMap::new(),
cache: HashSet::new(),
}
}
// returns the size of the page after serialization.
pub fn size(&self) -> usize {
let mut n = self.count();
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n += 1;
}
get_page_header_size() + mem::size_of::<pgid_t>() * n
}
pub fn count(&self) -> usize {
self.free_count() + self.pending_count()
}
pub fn free_count(&self) -> usize {
self.ids.len()
}
pub fn pending_count(&self) -> usize {
let mut count: usize = 0;
for (_, val) in self.pending.iter() {
count += val.len()
}
count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
pub fn copyall(&self, dst: *mut pgid_t) {
let mut m = Vec::with_capacity(self.pending_count());
for (_, list) in self.pending.iter() {
let mut copy_list = list.to_vec();
m.append(&mut copy_list);
}
m.sort();
merge_pgids_raw(dst, &self.ids, &m);
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd!= 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t.. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if!self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 {
self.ids.clear();
} else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex();
}
// writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) | } else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if!pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f = FreeList::new();
let page1 = Rc::new(RefCell::new(Page {
id: 12,
flags: 0,
count: 0,
overflow: 1,
ptr: 0,
}));
f.free(100, Rc::clone(&page1));
let page2 = Rc::new(RefCell::new(Page {
id: 9,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page2));
let page3 = Rc::new(RefCell::new(Page {
id: 39,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(102, Rc::clone(&page3));
f.release(100);
f.release(101);
assert_eq!(f.ids, vec![9,12,13]);
f.release(102);
assert_eq!(f.ids, vec![9,12,13, 39]);
}
#[test]
fn freelist_allocate() {
let mut f = FreeList {
ids: vec![3,4,5,6,7,9,12,13,18],
pending: HashMap::new(),
cache: HashSet::new(),
};
assert_eq!(f.allocate(3), 3);
assert_eq!(f.allocate(1), 6);
assert_eq!(f.allocate(3), 0);
assert_eq!(f.allocate(2), 12);
assert_eq!(f.allocate(1), 7);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.ids, vec![9,18]);
assert_eq!(f.allocate(1), 9);
assert_eq!(f.allocate(1), 18);
assert_eq!(f.allocate(1), 0);
assert_eq!(f.ids, vec![]);
}
#[test]
fn freelist_read() {
// Create a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
unsafe {
(*page).flags = FREELIST_PAGE_FLAG;
(*page).count = 2;
}
// Insert 2 page ids
let ids_ptr: *mut pgid_t = unsafe {
&mut (*page).ptr as *mut usize as *mut pgid_t
};
unsafe {
*ids_ptr = 23;
*ids_ptr.offset(1) = 50;
}
// Deserialize page into a freelist.
let mut f = FreeList::new();
unsafe {
f.read(&(*page));
}
// Ensure that there are two page ids in the freelist.
assert_eq!(f.ids, vec![23, 50]);
}
#[test]
fn freelist_write() {
// Create a freelist and write it to a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
let mut f = FreeList {
ids: vec![12, 39],
pending: HashMap::new(),
cache: HashSet::new(),
};
f.pending.insert(100, vec![28, 11]);
f.pending.insert(101, vec![3]);
unsafe { f.write(page.as_mut().unwrap()); };
// Read the page back out
let mut f2 = FreeList::new();
let p_const = page as *const Page;
unsafe {
f2.read(&(*p_const));
}
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
assert_eq!(f2.ids, vec![3, 11, 12, 28, 39]);
}
#[bench]
fn bench_freelis_release_10k(b: &mut Bencher) {
bench_freelist_release(b, 10000);
}
#[bench]
fn bench_freelis_release_100k(b: &mut Bencher) {
bench_freelist_release(b, 100000);
}
#[bench]
fn bench_freelis_release_1000k(b: &mut Bencher) {
bench_freelist_release(b, 1000000);
}
#[bench]
fn bench_freelis_release_10000k(b: &mut Bencher) {
bench_freelist_release(b, 10000000);
}
fn bench_freelist_release(b: &mut Bencher, size: usize) {
let ids = random_pgids(size);
let pending = random_pgids(ids.len() / 400);
b.iter(|| {
let mut f = FreeList::new();
f.ids.append(&mut ids.to_vec());
f.pending.insert(1, pending.to_vec());
f.release(1)
});
}
fn random_pgids(n: usize) -> Vec<pgid_t> {
let mut result: Vec<pgid_t> = Vec::with_capacity(n);
for i in 0..n {
result.push(rand::random::<pgid_t>());
}
result
}
}
| {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr); | identifier_body |
freelist.rs | use types::{txid_t, pgid_t};
use page::{Page, get_page_header_size, merge_pgids, merge_pgids_raw, FREELIST_PAGE_FLAG};
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::slice;
// FreeList represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
pub struct FreeList {
pub ids: Vec<pgid_t>, // all free and available free page ids
pub pending: HashMap<txid_t, Vec<pgid_t>>, // mapping of soon-to-be free page ids by tx
pub cache: HashSet<pgid_t>, // fast lookup of all free and pending page ids
}
impl FreeList {
pub fn new() -> FreeList {
FreeList{
ids: vec![],
pending: HashMap::new(),
cache: HashSet::new(),
}
}
// returns the size of the page after serialization.
pub fn size(&self) -> usize {
let mut n = self.count();
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n += 1;
}
get_page_header_size() + mem::size_of::<pgid_t>() * n
}
pub fn count(&self) -> usize {
self.free_count() + self.pending_count()
}
pub fn free_count(&self) -> usize {
self.ids.len()
}
pub fn pending_count(&self) -> usize {
let mut count: usize = 0;
for (_, val) in self.pending.iter() {
count += val.len()
}
count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
pub fn copyall(&self, dst: *mut pgid_t) {
let mut m = Vec::with_capacity(self.pending_count());
for (_, list) in self.pending.iter() {
let mut copy_list = list.to_vec();
m.append(&mut copy_list);
}
m.sort();
merge_pgids_raw(dst, &self.ids, &m);
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd!= 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t.. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if!self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 {
self.ids.clear();
} else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex();
}
// writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr);
} else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if!pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f = FreeList::new();
let page1 = Rc::new(RefCell::new(Page {
id: 12,
flags: 0,
count: 0,
overflow: 1,
ptr: 0,
}));
f.free(100, Rc::clone(&page1));
let page2 = Rc::new(RefCell::new(Page {
id: 9,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page2));
let page3 = Rc::new(RefCell::new(Page {
id: 39,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(102, Rc::clone(&page3));
f.release(100);
f.release(101);
assert_eq!(f.ids, vec![9,12,13]);
f.release(102);
assert_eq!(f.ids, vec![9,12,13, 39]);
}
#[test]
fn freelist_allocate() {
let mut f = FreeList {
ids: vec![3,4,5,6,7,9,12,13,18],
pending: HashMap::new(),
cache: HashSet::new(),
};
assert_eq!(f.allocate(3), 3);
assert_eq!(f.allocate(1), 6);
assert_eq!(f.allocate(3), 0);
assert_eq!(f.allocate(2), 12);
assert_eq!(f.allocate(1), 7);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.ids, vec![9,18]);
assert_eq!(f.allocate(1), 9);
assert_eq!(f.allocate(1), 18);
assert_eq!(f.allocate(1), 0);
assert_eq!(f.ids, vec![]);
}
#[test]
fn | () {
// Create a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
unsafe {
(*page).flags = FREELIST_PAGE_FLAG;
(*page).count = 2;
}
// Insert 2 page ids
let ids_ptr: *mut pgid_t = unsafe {
&mut (*page).ptr as *mut usize as *mut pgid_t
};
unsafe {
*ids_ptr = 23;
*ids_ptr.offset(1) = 50;
}
// Deserialize page into a freelist.
let mut f = FreeList::new();
unsafe {
f.read(&(*page));
}
// Ensure that there are two page ids in the freelist.
assert_eq!(f.ids, vec![23, 50]);
}
#[test]
fn freelist_write() {
// Create a freelist and write it to a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
let mut f = FreeList {
ids: vec![12, 39],
pending: HashMap::new(),
cache: HashSet::new(),
};
f.pending.insert(100, vec![28, 11]);
f.pending.insert(101, vec![3]);
unsafe { f.write(page.as_mut().unwrap()); };
// Read the page back out
let mut f2 = FreeList::new();
let p_const = page as *const Page;
unsafe {
f2.read(&(*p_const));
}
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
assert_eq!(f2.ids, vec![3, 11, 12, 28, 39]);
}
#[bench]
fn bench_freelis_release_10k(b: &mut Bencher) {
bench_freelist_release(b, 10000);
}
#[bench]
fn bench_freelis_release_100k(b: &mut Bencher) {
bench_freelist_release(b, 100000);
}
#[bench]
fn bench_freelis_release_1000k(b: &mut Bencher) {
bench_freelist_release(b, 1000000);
}
#[bench]
fn bench_freelis_release_10000k(b: &mut Bencher) {
bench_freelist_release(b, 10000000);
}
fn bench_freelist_release(b: &mut Bencher, size: usize) {
let ids = random_pgids(size);
let pending = random_pgids(ids.len() / 400);
b.iter(|| {
let mut f = FreeList::new();
f.ids.append(&mut ids.to_vec());
f.pending.insert(1, pending.to_vec());
f.release(1)
});
}
fn random_pgids(n: usize) -> Vec<pgid_t> {
let mut result: Vec<pgid_t> = Vec::with_capacity(n);
for i in 0..n {
result.push(rand::random::<pgid_t>());
}
result
}
}
| freelist_read | identifier_name |
freelist.rs | use types::{txid_t, pgid_t};
use page::{Page, get_page_header_size, merge_pgids, merge_pgids_raw, FREELIST_PAGE_FLAG};
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::slice;
// FreeList represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
pub struct FreeList {
pub ids: Vec<pgid_t>, // all free and available free page ids
pub pending: HashMap<txid_t, Vec<pgid_t>>, // mapping of soon-to-be free page ids by tx
pub cache: HashSet<pgid_t>, // fast lookup of all free and pending page ids
}
impl FreeList {
pub fn new() -> FreeList {
FreeList{
ids: vec![],
pending: HashMap::new(),
cache: HashSet::new(),
}
}
// returns the size of the page after serialization.
pub fn size(&self) -> usize {
let mut n = self.count();
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n += 1;
}
get_page_header_size() + mem::size_of::<pgid_t>() * n
}
pub fn count(&self) -> usize {
self.free_count() + self.pending_count()
}
pub fn free_count(&self) -> usize {
self.ids.len()
}
pub fn pending_count(&self) -> usize {
let mut count: usize = 0;
for (_, val) in self.pending.iter() {
count += val.len()
}
count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
pub fn copyall(&self, dst: *mut pgid_t) {
let mut m = Vec::with_capacity(self.pending_count());
for (_, list) in self.pending.iter() {
let mut copy_list = list.to_vec();
m.append(&mut copy_list);
}
m.sort();
merge_pgids_raw(dst, &self.ids, &m);
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
pub fn allocate(&mut self, n: usize) -> pgid_t {
if self.ids.len() == 0 {
return 0;
}
let mut initial: pgid_t = 0;
let mut previd: pgid_t = 0;
let mut found_index: Option<usize> = None;
for i in 0..self.ids.len() {
let id = self.ids[i];
if id <= 1 {
panic!("invalid page allocation: {}", id);
}
// Reset initial page if this is not contiguous.
if previd == 0 || id - previd!= 1 {
initial = id;
}
// If we found a contiguous block then remove it and return it.
if (id - initial) + 1 == n as pgid_t {
found_index = Some(i);
break;
}
previd = id
}
match found_index {
None => 0,
Some(idx) => {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if idx + 1 == n {
self.ids.drain(..idx+1);
} else {
self.ids.drain(idx-n+1..idx+1);
}
// Remove from the free cache
for i in 0 as pgid_t.. n as pgid_t {
self.cache.remove(&i);
}
initial
}
}
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
pub fn free(&mut self, txid: txid_t, p: Rc<RefCell<Page>>) {
let pgid = p.borrow().id;
if pgid <= 1 {
panic!("cannot free page 0 or 1: {}", pgid);
}
// Free page and all its overflow pages.
if!self.pending.contains_key(&txid) {
self.pending.insert(txid, Vec::new());
}
let ids_option = self.pending.get_mut(&txid);
match ids_option {
None => panic!("pending should not be None"),
Some(ids) => {
for id in pgid..pgid + 1 + p.borrow().overflow as pgid_t {
// Verify that page is not already free.
if self.cache.contains(&id) {
panic!("page {} already freed")
}
// Add to the freelist and cache.
ids.push(id);
self.cache.insert(id);
}
},
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
pub fn release(&mut self, txid: txid_t) {
let mut m: Vec<pgid_t> = Vec::new();
self.pending.retain(|tid, ids| {
if *tid <= txid {
m.append(&mut ids.to_vec());
return false;
}
true
});
m.sort();
let mut new_ids: Vec<pgid_t> = Vec::with_capacity(self.ids.len() + m.len());
merge_pgids(&mut new_ids, &self.ids, &m);
self.ids = new_ids;
}
// rollback removes the pages from a given pending tx.
pub fn rollback(&mut self, txid: txid_t) {
// Remove page ids from cache.
for id in &self.pending[&txid] {
self.cache.remove(id);
}
// Remove pages from pending list
self.pending.remove(&txid);
}
// freed returns whether a given page is in the free list
pub fn freed(&self, pgid: pgid_t) -> bool {
self.cache.contains(&pgid)
}
// read initializes the freelist from a freelist page.
pub fn read(&mut self, p: &Page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
let mut idx: usize = 0;
let mut count: usize = p.count as usize;
if count == 0xFFFF {
idx = 1;
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
count = unsafe { (*pgid_ptr) as usize };
}
// Copy the list of page ids from the freelist
if count == 0 {
self.ids.clear();
} else {
let pgid_ptr = &p.ptr as *const usize as *const pgid_t;
self.ids.reserve(count - idx);
let mut pgids_slice = unsafe {
slice::from_raw_parts(pgid_ptr.offset(idx as isize), count)
};
self.ids.append(&mut pgids_slice.to_vec());
// Make sure they're sorted.
self.ids.sort();
}
// Rebuild the page cache.
self.reindex(); | // writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
pub fn write(&self, p: &mut Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= FREELIST_PAGE_FLAG;
// The page.count can only hold up to 64k elementes so if we overflow that
// number then we handle it by putting the size in the first element.
let lenids = self.count();
if lenids == 0 {
p.count = lenids as u16;
} else if lenids < 0xFFFF {
p.count = lenids as u16;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr, 0, lenids)
};
*/
self.copyall(pgid_ptr);
} else {
p.count = 0xFFFF;
let mut pgid_ptr = &mut p.ptr as *mut usize as *mut pgid_t;
unsafe {*pgid_ptr = lenids as u64;}
/*
let mut dst = unsafe {
Vec::from_raw_parts(pgid_ptr.offset(1), 0, lenids)
};
*/
self.copyall(unsafe {pgid_ptr.offset(1)});
}
}
// reload reads the freelist from a page and filters out pending items.
pub fn reload(&mut self, p: &Page) {
self.read(p);
// Build a cache of only pending pages.
let mut pcache: HashSet<pgid_t> = HashSet::new();
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
pcache.insert(*pending_id);
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
let mut a: Vec<pgid_t> = Vec::new();
for id in &self.ids {
if!pcache.contains(id) {
a.push(*id);
}
}
self.ids = a;
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
self.reindex();
}
// reindex rebuilds the free cache based on available and pending free lists.
pub fn reindex(&mut self) {
self.cache.clear();
self.cache.reserve(self.ids.len());
for id in &self.ids {
self.cache.insert(*id);
}
for pending_ids in self.pending.values() {
for pending_id in pending_ids {
self.cache.insert(*pending_id);
}
}
}
}
#[cfg(test)]
mod tests {
use freelist::FreeList;
use std::rc::Rc;
use std::cell::RefCell;
use page::{Page, FREELIST_PAGE_FLAG};
use std::collections::{HashMap, HashSet};
use types::pgid_t;
use test::Bencher;
extern crate rand;
#[test]
fn freelist_free() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12]);
}
#[test]
fn freelist_free_overflow() {
let mut f = FreeList::new();
let page = Rc::new(RefCell::new(Page{
id: 12,
flags: 0,
count: 0,
overflow: 3,
ptr: 0,
}));
f.free(100, Rc::clone(&page));
assert_eq!(f.pending[&100], vec![12,13,14,15]);
}
#[test]
fn freelist_release() {
let mut f = FreeList::new();
let page1 = Rc::new(RefCell::new(Page {
id: 12,
flags: 0,
count: 0,
overflow: 1,
ptr: 0,
}));
f.free(100, Rc::clone(&page1));
let page2 = Rc::new(RefCell::new(Page {
id: 9,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(100, Rc::clone(&page2));
let page3 = Rc::new(RefCell::new(Page {
id: 39,
flags: 0,
count: 0,
overflow: 0,
ptr: 0,
}));
f.free(102, Rc::clone(&page3));
f.release(100);
f.release(101);
assert_eq!(f.ids, vec![9,12,13]);
f.release(102);
assert_eq!(f.ids, vec![9,12,13, 39]);
}
#[test]
fn freelist_allocate() {
let mut f = FreeList {
ids: vec![3,4,5,6,7,9,12,13,18],
pending: HashMap::new(),
cache: HashSet::new(),
};
assert_eq!(f.allocate(3), 3);
assert_eq!(f.allocate(1), 6);
assert_eq!(f.allocate(3), 0);
assert_eq!(f.allocate(2), 12);
assert_eq!(f.allocate(1), 7);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.allocate(0), 0);
assert_eq!(f.ids, vec![9,18]);
assert_eq!(f.allocate(1), 9);
assert_eq!(f.allocate(1), 18);
assert_eq!(f.allocate(1), 0);
assert_eq!(f.ids, vec![]);
}
#[test]
fn freelist_read() {
// Create a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
unsafe {
(*page).flags = FREELIST_PAGE_FLAG;
(*page).count = 2;
}
// Insert 2 page ids
let ids_ptr: *mut pgid_t = unsafe {
&mut (*page).ptr as *mut usize as *mut pgid_t
};
unsafe {
*ids_ptr = 23;
*ids_ptr.offset(1) = 50;
}
// Deserialize page into a freelist.
let mut f = FreeList::new();
unsafe {
f.read(&(*page));
}
// Ensure that there are two page ids in the freelist.
assert_eq!(f.ids, vec![23, 50]);
}
#[test]
fn freelist_write() {
// Create a freelist and write it to a page.
let mut buf: [u8; 4096] = [0; 4096];
let page: *mut Page = buf.as_mut_ptr() as *mut Page;
let mut f = FreeList {
ids: vec![12, 39],
pending: HashMap::new(),
cache: HashSet::new(),
};
f.pending.insert(100, vec![28, 11]);
f.pending.insert(101, vec![3]);
unsafe { f.write(page.as_mut().unwrap()); };
// Read the page back out
let mut f2 = FreeList::new();
let p_const = page as *const Page;
unsafe {
f2.read(&(*p_const));
}
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
assert_eq!(f2.ids, vec![3, 11, 12, 28, 39]);
}
#[bench]
fn bench_freelis_release_10k(b: &mut Bencher) {
bench_freelist_release(b, 10000);
}
#[bench]
fn bench_freelis_release_100k(b: &mut Bencher) {
bench_freelist_release(b, 100000);
}
#[bench]
fn bench_freelis_release_1000k(b: &mut Bencher) {
bench_freelist_release(b, 1000000);
}
#[bench]
fn bench_freelis_release_10000k(b: &mut Bencher) {
bench_freelist_release(b, 10000000);
}
fn bench_freelist_release(b: &mut Bencher, size: usize) {
let ids = random_pgids(size);
let pending = random_pgids(ids.len() / 400);
b.iter(|| {
let mut f = FreeList::new();
f.ids.append(&mut ids.to_vec());
f.pending.insert(1, pending.to_vec());
f.release(1)
});
}
fn random_pgids(n: usize) -> Vec<pgid_t> {
let mut result: Vec<pgid_t> = Vec::with_capacity(n);
for i in 0..n {
result.push(rand::random::<pgid_t>());
}
result
}
} | }
| random_line_split |
13.1.rs | // --- Day 13: Packet Scanners ---
// You need to cross a vast firewall. The firewall consists of several layers, each with a security scanner that moves back and forth across the layer. To succeed, you must not be detected by a scanner.
// By studying the firewall briefly, you are able to record (in your puzzle input) the depth of each layer and the range of the scanning area for the scanner within it, written as depth: range. Each layer has a thickness of exactly 1. A layer at depth 0 begins immediately inside the firewall; a layer at depth 1 would start immediately after that.
// For example, suppose you've recorded the following:
// 0: 3
// 1: 2
// 4: 4
// 6: 4
// This means that there is a layer immediately inside the firewall (with range 3), a second layer immediately after that (with range 2), a third layer which begins at depth 4 (with range 4), and a fourth layer which begins at depth 6 (also with range 4). Visually, it might look like this:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Within each layer, a security scanner moves back and forth within its range. Each security scanner starts at the top and moves down until it reaches the bottom, then moves up until it reaches the top, and repeats. A security scanner takes one picosecond to move one step. Drawing scanners as S, the first few picoseconds look like this:
// Picosecond 0:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] ( )...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S)...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.)... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.)... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]... (.) [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S]... (.) [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S]...... ( )... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... ( )... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S]...... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S]...... [S]... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 {
return Some(0);
}
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> Option<&i32> {
self.scanners.get(d)
}
}
fn | (f: File) -> Scanners {
let buf = BufReader::new(f);
let mut scanners = HashMap::new();
let mut max_depth = 0;
let mut max_range = 0;
for line in buf.lines() {
let split = line.expect("io error")
.split(": ")
.map(|s| s.parse::<i32>().expect("parse int err"))
.collect::<Vec<i32>>();
scanners.insert(split[0], split[1]);
max_depth = if split[0] > max_depth {
split[0]
} else {
max_depth
};
max_range = if split[1] > max_range {
split[1]
} else {
max_range
};
}
Scanners {
max_range,
max_depth,
scanners,
}
}
/* Advent13-1 the total severity of starting at a given offset */
fn severity(offset: &i32, scanners: &Scanners) -> i32 {
let mut severity = 0;
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if let Some(pos) = scanners.pos(&d, &scanner_time) {
if pos == 0 {
let r = scanners.range(&d).unwrap();
// println!("Hit layer {} and got severity {}", d, r*d);
severity += *r * d;
}
}
// debug_print_scanners(&d, &scanner_time, &scanners);
d += 1;
}
severity
}
/* Advent13-2 does an offset result in detection? */
fn detected(offset: &i32, scanners: &Scanners) -> bool {
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if scanners.collides(&d, &scanner_time) {
return true;
}
d += 1;
}
false
}
fn main() {
let fname = "src/13/data";
// let fname = "src/13/testdata";
let f = File::open(fname).expect(&format!("Couldn't open {}", fname));
let scanners = get_scanners(f);
println!(
"Advent 13-1: severity {} at offset 0.",
severity(&0, &scanners)
);
let mut offset = 0;
while detected(&offset, &scanners) {
offset += 1;
}
println!("Advent 13-2: 0 detections at offset {}.", offset);
}
// /* Advent13-2 the total number of detections starting at a given offset */
// fn detections(offset: &i32, scanners: &Scanners) -> i32 {
// let mut detections = 0;
// let mut d : i32 = 0;
// while d <= scanners.max_depth {
// let scanner_time = d + offset;
// if scanners.collides(&d, &scanner_time) {
// detections += 1;
// }
// // debug_print_scanners(&d, &scanner_time, &scanners);
// d += 1;
// }
// detections
// }
// Print the packet depth and show scanner positions after they've run for t picoseconds
// fn debug_print_scanners(packet_d: &i32, t: &i32, scanners: &Scanners) {
// println!("Picosecond {} ---", t);
// println!("{}", (0..scanners.max_depth + 1).map(|i| format!(" {} ", i)).collect::<Vec<String>>().join(" "));
// for i in 0..scanners.max_range {
// let mut cells = vec![];
// for d in 0..scanners.max_depth + 1 {
// let pos = scanners.pos(&d, &t);
// let cell = match pos {
// Some(p) => {
// if i == 0 && p == 0 && *packet_d == d {
// "[!]"
// }
// else if i == 0 && *packet_d == d {
// "[*]"
// }
// else if i == p {
// "[S]"
// } else if i >= *scanners.range(&d).unwrap() {
// " "
// } else {
// "[ ]"
// }
// },
// _ =>
// if i == 0 && *packet_d == d {
// " * "
// }
// else if i == 0 {
// "..."
// } else {
// " "
// }
// };
// cells.push(cell);
// }
// println!("{}\n", &cells.join(" "));
// }
// }
| get_scanners | identifier_name |
13.1.rs | // --- Day 13: Packet Scanners ---
// You need to cross a vast firewall. The firewall consists of several layers, each with a security scanner that moves back and forth across the layer. To succeed, you must not be detected by a scanner.
// By studying the firewall briefly, you are able to record (in your puzzle input) the depth of each layer and the range of the scanning area for the scanner within it, written as depth: range. Each layer has a thickness of exactly 1. A layer at depth 0 begins immediately inside the firewall; a layer at depth 1 would start immediately after that.
// For example, suppose you've recorded the following:
// 0: 3
// 1: 2
// 4: 4
// 6: 4
// This means that there is a layer immediately inside the firewall (with range 3), a second layer immediately after that (with range 2), a third layer which begins at depth 4 (with range 4), and a fourth layer which begins at depth 6 (also with range 4). Visually, it might look like this:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Within each layer, a security scanner moves back and forth within its range. Each security scanner starts at the top and moves down until it reaches the bottom, then moves up until it reaches the top, and repeats. A security scanner takes one picosecond to move one step. Drawing scanners as S, the first few picoseconds look like this:
// Picosecond 0:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
| // 0 1 2 3 4 5 6
// [ ] ( )...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S)...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.)... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.)... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]... (.) [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S]... (.) [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S]...... ( )... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... ( )... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S]...... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S]...... [S]... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 {
return Some(0);
}
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> Option<&i32> {
self.scanners.get(d)
}
}
fn get_scanners(f: File) -> Scanners {
let buf = BufReader::new(f);
let mut scanners = HashMap::new();
let mut max_depth = 0;
let mut max_range = 0;
for line in buf.lines() {
let split = line.expect("io error")
.split(": ")
.map(|s| s.parse::<i32>().expect("parse int err"))
.collect::<Vec<i32>>();
scanners.insert(split[0], split[1]);
max_depth = if split[0] > max_depth {
split[0]
} else {
max_depth
};
max_range = if split[1] > max_range {
split[1]
} else {
max_range
};
}
Scanners {
max_range,
max_depth,
scanners,
}
}
/* Advent13-1 the total severity of starting at a given offset */
fn severity(offset: &i32, scanners: &Scanners) -> i32 {
let mut severity = 0;
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if let Some(pos) = scanners.pos(&d, &scanner_time) {
if pos == 0 {
let r = scanners.range(&d).unwrap();
// println!("Hit layer {} and got severity {}", d, r*d);
severity += *r * d;
}
}
// debug_print_scanners(&d, &scanner_time, &scanners);
d += 1;
}
severity
}
/* Advent13-2 does an offset result in detection? */
fn detected(offset: &i32, scanners: &Scanners) -> bool {
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if scanners.collides(&d, &scanner_time) {
return true;
}
d += 1;
}
false
}
fn main() {
let fname = "src/13/data";
// let fname = "src/13/testdata";
let f = File::open(fname).expect(&format!("Couldn't open {}", fname));
let scanners = get_scanners(f);
println!(
"Advent 13-1: severity {} at offset 0.",
severity(&0, &scanners)
);
let mut offset = 0;
while detected(&offset, &scanners) {
offset += 1;
}
println!("Advent 13-2: 0 detections at offset {}.", offset);
}
// /* Advent13-2 the total number of detections starting at a given offset */
// fn detections(offset: &i32, scanners: &Scanners) -> i32 {
// let mut detections = 0;
// let mut d : i32 = 0;
// while d <= scanners.max_depth {
// let scanner_time = d + offset;
// if scanners.collides(&d, &scanner_time) {
// detections += 1;
// }
// // debug_print_scanners(&d, &scanner_time, &scanners);
// d += 1;
// }
// detections
// }
// Print the packet depth and show scanner positions after they've run for t picoseconds
// fn debug_print_scanners(packet_d: &i32, t: &i32, scanners: &Scanners) {
// println!("Picosecond {} ---", t);
// println!("{}", (0..scanners.max_depth + 1).map(|i| format!(" {} ", i)).collect::<Vec<String>>().join(" "));
// for i in 0..scanners.max_range {
// let mut cells = vec![];
// for d in 0..scanners.max_depth + 1 {
// let pos = scanners.pos(&d, &t);
// let cell = match pos {
// Some(p) => {
// if i == 0 && p == 0 && *packet_d == d {
// "[!]"
// }
// else if i == 0 && *packet_d == d {
// "[*]"
// }
// else if i == p {
// "[S]"
// } else if i >= *scanners.range(&d).unwrap() {
// " "
// } else {
// "[ ]"
// }
// },
// _ =>
// if i == 0 && *packet_d == d {
// " * "
// }
// else if i == 0 {
// "..."
// } else {
// " "
// }
// };
// cells.push(cell);
// }
// println!("{}\n", &cells.join(" "));
// }
// } | // [ ] [ ]
// Picosecond 1:
| random_line_split |
13.1.rs | // --- Day 13: Packet Scanners ---
// You need to cross a vast firewall. The firewall consists of several layers, each with a security scanner that moves back and forth across the layer. To succeed, you must not be detected by a scanner.
// By studying the firewall briefly, you are able to record (in your puzzle input) the depth of each layer and the range of the scanning area for the scanner within it, written as depth: range. Each layer has a thickness of exactly 1. A layer at depth 0 begins immediately inside the firewall; a layer at depth 1 would start immediately after that.
// For example, suppose you've recorded the following:
// 0: 3
// 1: 2
// 4: 4
// 6: 4
// This means that there is a layer immediately inside the firewall (with range 3), a second layer immediately after that (with range 2), a third layer which begins at depth 4 (with range 4), and a fourth layer which begins at depth 6 (also with range 4). Visually, it might look like this:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Within each layer, a security scanner moves back and forth within its range. Each security scanner starts at the top and moves down until it reaches the bottom, then moves up until it reaches the top, and repeats. A security scanner takes one picosecond to move one step. Drawing scanners as S, the first few picoseconds look like this:
// Picosecond 0:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] ( )...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S)...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.)... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.)... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]... (.) [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S]... (.) [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S]...... ( )... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... ( )... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S]...... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S]...... [S]... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 |
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> Option<&i32> {
self.scanners.get(d)
}
}
fn get_scanners(f: File) -> Scanners {
let buf = BufReader::new(f);
let mut scanners = HashMap::new();
let mut max_depth = 0;
let mut max_range = 0;
for line in buf.lines() {
let split = line.expect("io error")
.split(": ")
.map(|s| s.parse::<i32>().expect("parse int err"))
.collect::<Vec<i32>>();
scanners.insert(split[0], split[1]);
max_depth = if split[0] > max_depth {
split[0]
} else {
max_depth
};
max_range = if split[1] > max_range {
split[1]
} else {
max_range
};
}
Scanners {
max_range,
max_depth,
scanners,
}
}
/* Advent13-1 the total severity of starting at a given offset */
fn severity(offset: &i32, scanners: &Scanners) -> i32 {
let mut severity = 0;
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if let Some(pos) = scanners.pos(&d, &scanner_time) {
if pos == 0 {
let r = scanners.range(&d).unwrap();
// println!("Hit layer {} and got severity {}", d, r*d);
severity += *r * d;
}
}
// debug_print_scanners(&d, &scanner_time, &scanners);
d += 1;
}
severity
}
/* Advent13-2 does an offset result in detection? */
fn detected(offset: &i32, scanners: &Scanners) -> bool {
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if scanners.collides(&d, &scanner_time) {
return true;
}
d += 1;
}
false
}
fn main() {
let fname = "src/13/data";
// let fname = "src/13/testdata";
let f = File::open(fname).expect(&format!("Couldn't open {}", fname));
let scanners = get_scanners(f);
println!(
"Advent 13-1: severity {} at offset 0.",
severity(&0, &scanners)
);
let mut offset = 0;
while detected(&offset, &scanners) {
offset += 1;
}
println!("Advent 13-2: 0 detections at offset {}.", offset);
}
// /* Advent13-2 the total number of detections starting at a given offset */
// fn detections(offset: &i32, scanners: &Scanners) -> i32 {
// let mut detections = 0;
// let mut d : i32 = 0;
// while d <= scanners.max_depth {
// let scanner_time = d + offset;
// if scanners.collides(&d, &scanner_time) {
// detections += 1;
// }
// // debug_print_scanners(&d, &scanner_time, &scanners);
// d += 1;
// }
// detections
// }
// Print the packet depth and show scanner positions after they've run for t picoseconds
// fn debug_print_scanners(packet_d: &i32, t: &i32, scanners: &Scanners) {
// println!("Picosecond {} ---", t);
// println!("{}", (0..scanners.max_depth + 1).map(|i| format!(" {} ", i)).collect::<Vec<String>>().join(" "));
// for i in 0..scanners.max_range {
// let mut cells = vec![];
// for d in 0..scanners.max_depth + 1 {
// let pos = scanners.pos(&d, &t);
// let cell = match pos {
// Some(p) => {
// if i == 0 && p == 0 && *packet_d == d {
// "[!]"
// }
// else if i == 0 && *packet_d == d {
// "[*]"
// }
// else if i == p {
// "[S]"
// } else if i >= *scanners.range(&d).unwrap() {
// " "
// } else {
// "[ ]"
// }
// },
// _ =>
// if i == 0 && *packet_d == d {
// " * "
// }
// else if i == 0 {
// "..."
// } else {
// " "
// }
// };
// cells.push(cell);
// }
// println!("{}\n", &cells.join(" "));
// }
// }
| {
return Some(0);
} | conditional_block |
13.1.rs | // --- Day 13: Packet Scanners ---
// You need to cross a vast firewall. The firewall consists of several layers, each with a security scanner that moves back and forth across the layer. To succeed, you must not be detected by a scanner.
// By studying the firewall briefly, you are able to record (in your puzzle input) the depth of each layer and the range of the scanning area for the scanner within it, written as depth: range. Each layer has a thickness of exactly 1. A layer at depth 0 begins immediately inside the firewall; a layer at depth 1 would start immediately after that.
// For example, suppose you've recorded the following:
// 0: 3
// 1: 2
// 4: 4
// 6: 4
// This means that there is a layer immediately inside the firewall (with range 3), a second layer immediately after that (with range 2), a third layer which begins at depth 4 (with range 4), and a fourth layer which begins at depth 6 (also with range 4). Visually, it might look like this:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Within each layer, a security scanner moves back and forth within its range. Each security scanner starts at the top and moves down until it reaches the bottom, then moves up until it reaches the top, and repeats. A security scanner takes one picosecond to move one step. Drawing scanners as S, the first few picoseconds look like this:
// Picosecond 0:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S]...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Your plan is to hitch a ride on a packet about to move through the firewall. The packet will travel along the top of each layer, and it moves at one layer per picosecond. Each picosecond, the packet moves one layer forward (its first move takes it into layer 0), and then the scanners move one step. If there is a scanner at the top of the layer as your packet enters it, you are caught. (If a scanner moves into the top of its layer while you are there, you are not caught: it doesn't have time to notice you before you leave.) If you were to do this in the configuration above, marking your current position with parentheses, your passage through the firewall would look like this:
// Initial state:
// 0 1 2 3 4 5 6
// [S] [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 0:
// 0 1 2 3 4 5 6
// (S) [S]...... [S]... [S]
// [ ] [ ] [ ] [ ]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// ( ) [ ]...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 1:
// 0 1 2 3 4 5 6
// [ ] ( )...... [ ]... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] (S)...... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// Picosecond 2:
// 0 1 2 3 4 5 6
// [ ] [S] (.)... [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [S] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ] (.)... [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// Picosecond 3:
// 0 1 2 3 4 5 6
// [ ] [ ]... (.) [ ]... [ ]
// [S] [S] [ ] [ ]
// [ ] [ ] [ ]
// [S] [S]
// 0 1 2 3 4 5 6
// [S] [S]... (.) [ ]... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// Picosecond 4:
// 0 1 2 3 4 5 6
// [S] [S]...... ( )... [ ]
// [ ] [ ] [ ] [ ]
// [ ] [S] [S]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... ( )... [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// Picosecond 5:
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ] (.) [ ]
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [S]...... [S] (.) [S]
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// Picosecond 6:
// 0 1 2 3 4 5 6
// [ ] [S]...... [S]... (S)
// [ ] [ ] [ ] [ ]
// [S] [ ] [ ]
// [ ] [ ]
// 0 1 2 3 4 5 6
// [ ] [ ]...... [ ]... ( )
// [S] [S] [S] [S]
// [ ] [ ] [ ]
// [ ] [ ]
// In this situation, you are caught in layers 0 and 6, because your packet entered the layer when its scanner was at the top when you entered it. You are not caught in layer 1, since the scanner moved into the top of the layer once you were already there.
// The severity of getting caught on a layer is equal to its depth multiplied by its range. (Ignore layers in which you do not get caught.) The severity of the whole trip is the sum of these values. In the example above, the trip severity is 0*3 + 6*4 = 24.
// Given the details of the firewall you've recorded, if you leave immediately, what is the severity of your whole trip?
// Now, you need to pass through the firewall without being caught - easier said than done.
// You can't control the speed of the packet, but you can delay it any number of picoseconds. For each picosecond you delay the packet before beginning your trip, all security scanners move one step. You're not in the firewall during this time; you don't enter layer 0 until you stop delaying the packet.
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
#[derive(Debug)]
struct Scanners {
max_depth: i32,
max_range: i32,
scanners: HashMap<i32, i32>,
}
impl Scanners {
// The position of the scanner at depth d after it has run for time t.
// None if there no scanner at depth d
fn pos(&self, d: &i32, t: &i32) -> Option<i32> {
if let Some(r) = self.range(&d) {
if *t < 0 {
return Some(0);
}
let mi = r - 1; /* max index of the scanner range */
let unique_positions = r * 2 - 2; /* how many different states the scanner can be in. Whenever the scanner is at an end, it can only be turning around. Whenever a scanner is in a middle position, it could be going back or forward*/
let pos = *t % unique_positions;
if pos < *r {
return Some(pos);
} else {
return Some(mi - (pos % mi));
}
}
None
}
// Does a packet at depth d collide with a scanner at d that has been running for time t?
fn collides(&self, d: &i32, t: &i32) -> bool {
if let Some(r) = self.range(d) {
// There is a collision iff t % r * 2 - 2 == 0
return *t % (*r * 2 - 2) == 0;
}
false
}
fn range(&self, d: &i32) -> Option<&i32> {
self.scanners.get(d)
}
}
fn get_scanners(f: File) -> Scanners {
let buf = BufReader::new(f);
let mut scanners = HashMap::new();
let mut max_depth = 0;
let mut max_range = 0;
for line in buf.lines() {
let split = line.expect("io error")
.split(": ")
.map(|s| s.parse::<i32>().expect("parse int err"))
.collect::<Vec<i32>>();
scanners.insert(split[0], split[1]);
max_depth = if split[0] > max_depth {
split[0]
} else {
max_depth
};
max_range = if split[1] > max_range {
split[1]
} else {
max_range
};
}
Scanners {
max_range,
max_depth,
scanners,
}
}
/* Advent13-1 the total severity of starting at a given offset */
fn severity(offset: &i32, scanners: &Scanners) -> i32 {
let mut severity = 0;
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if let Some(pos) = scanners.pos(&d, &scanner_time) {
if pos == 0 {
let r = scanners.range(&d).unwrap();
// println!("Hit layer {} and got severity {}", d, r*d);
severity += *r * d;
}
}
// debug_print_scanners(&d, &scanner_time, &scanners);
d += 1;
}
severity
}
/* Advent13-2 does an offset result in detection? */
fn detected(offset: &i32, scanners: &Scanners) -> bool |
fn main() {
let fname = "src/13/data";
// let fname = "src/13/testdata";
let f = File::open(fname).expect(&format!("Couldn't open {}", fname));
let scanners = get_scanners(f);
println!(
"Advent 13-1: severity {} at offset 0.",
severity(&0, &scanners)
);
let mut offset = 0;
while detected(&offset, &scanners) {
offset += 1;
}
println!("Advent 13-2: 0 detections at offset {}.", offset);
}
// /* Advent13-2 the total number of detections starting at a given offset */
// fn detections(offset: &i32, scanners: &Scanners) -> i32 {
// let mut detections = 0;
// let mut d : i32 = 0;
// while d <= scanners.max_depth {
// let scanner_time = d + offset;
// if scanners.collides(&d, &scanner_time) {
// detections += 1;
// }
// // debug_print_scanners(&d, &scanner_time, &scanners);
// d += 1;
// }
// detections
// }
// Print the packet depth and show scanner positions after they've run for t picoseconds
// fn debug_print_scanners(packet_d: &i32, t: &i32, scanners: &Scanners) {
// println!("Picosecond {} ---", t);
// println!("{}", (0..scanners.max_depth + 1).map(|i| format!(" {} ", i)).collect::<Vec<String>>().join(" "));
// for i in 0..scanners.max_range {
// let mut cells = vec![];
// for d in 0..scanners.max_depth + 1 {
// let pos = scanners.pos(&d, &t);
// let cell = match pos {
// Some(p) => {
// if i == 0 && p == 0 && *packet_d == d {
// "[!]"
// }
// else if i == 0 && *packet_d == d {
// "[*]"
// }
// else if i == p {
// "[S]"
// } else if i >= *scanners.range(&d).unwrap() {
// " "
// } else {
// "[ ]"
// }
// },
// _ =>
// if i == 0 && *packet_d == d {
// " * "
// }
// else if i == 0 {
// "..."
// } else {
// " "
// }
// };
// cells.push(cell);
// }
// println!("{}\n", &cells.join(" "));
// }
// }
| {
let mut d: i32 = 0;
while d <= scanners.max_depth {
let scanner_time = d + offset;
if scanners.collides(&d, &scanner_time) {
return true;
}
d += 1;
}
false
} | identifier_body |
lib.rs | //! `pdf_derive` provides a proc macro to derive the Object trait from the `pdf` crate.
//! # Usage
//! There are several ways to derive Object on a struct or enum:
//! ## 1. Struct from PDF Dictionary
//!
//! A lot of dictionary types defined in the PDF 1.7 reference have a finite amount of possible
//! fields. Each of these are usually either required or optional. The latter is achieved by using
//! a `Option<T>` or `Vec<T>` as type of a field.
//!
//! Usually, dictionary types
//! require that the entry `/Type` is some specific string. By default, `pdf_derive` assumes that
//! this should equal the name of the input struct. This can be overridden by setting the `Type`
//! attribute equal to either the expected value of the `/Type` entry, or to `false` in order to
//! omit the type check completly.
//!
//! Check similar to that of `/Type` can also be specified in the same manner. (but the `Type`
//! attribute is special because it accepts a bool).
//!
//! Examples:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type="XObject", Subtype="Image")]
//! /// A variant of XObject
//! pub struct ImageDictionary {
//! #[pdf(key="Width")]
//! width: i32,
//! #[pdf(key="Height")]
//! height: i32,
//! // [...]
//! }
//! ```
//!
//! This enforces that the dictionary's `/Type` entry is present and equals `/XObject`, and that the
//! `/Subtype` entry is present and equals `/Image`.
//!
//! Each field in the struct needs to implement `Object`. Implementation is provided already for
//! common types like i32, f32, usize, bool, String (from Primitive::Name), Option<T> and Vec<T>.
//! The two latter are initialized to default if the entry isn't found in the input dictionary.
//! Option<T> is therefore frequently used for fields that are optional according to the PDF
//! reference. Vec<T> can also be used for optional fields that can also be arrays (there are quite
//! a few of those in the PDF specs - one or many). However, as stated, it accepts absense of the
//! entry, so **required** fields of type array aren't yet facilitated for.
//!
//! Lastly, for each field, it's possible to define a default value by setting the `default`
//! attribute to a string that can parse as Rust code.
//!
//! Example:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type = "XRef")]
//! pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" | else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream {
match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
}
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn impl_object_for_struct(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "<<")?;
#pdf_type
#( #checks_code )*
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
#from_primitive_code
}
}
}
}
/// Note: must have info and dict (TODO explain in docs)
fn impl_object_for_stream(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let info_ty = fields.iter()
.filter_map(|field| {
if let Some(ident) = field.ident.as_ref() {
if ident.as_ref() == "info" {
Some(field.ty.clone())
} else {
None
}
} else {
None
}
}).next().unwrap();
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, _out: &mut W) -> ::std::io::Result<()> {
unimplemented!();
/*
writeln!(out, "<<")?;
#type_code
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
*/
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
let ::pdf::primitive::PdfStream {info, data}
= p.to_stream(resolve).chain_err(|| stringify!(#name))?;
Ok(#name {
info: <#info_ty as Object>::from_primitive(::pdf::primitive::Primitive::Dictionary (info), resolve)?,
data: data,
})
}
}
}
}
/// Returns (let assignments, field assignments)
/// Example:
/// (`let name =...;`,
/// ` name: name`)
///
fn impl_parts(fields: &[Field]) -> (Vec<quote::Tokens>, Vec<quote::Tokens>) {
(fields.iter().map(|field| {
let (key, default, skip) = field_attrs(field);
if skip {
return quote! {}; // skip this field..
}
let ref name = field.ident;
let ty = field.ty.clone();
if let Some(ref default) = default {
let default = syn::parse_token_trees(&default).expect("Could not parse `default` code as Rust.");
quote! {
let #name = {
let primitive: Option<::pdf::primitive::Primitive>
= dict.remove(#key);
let x: #ty = match primitive {
Some(primitive) => <#ty as Object>::from_primitive(primitive, resolve).chain_err( || stringify!(#name) )?,
None => #( #default )*,
};
x
};
}
} else {
quote! {
let #name = {
match dict.remove(#key) {
Some(primitive) =>
match <#ty as Object>::from_primitive(primitive, resolve) {
Ok(obj) => obj,
Err(e) => bail!(e.chain_err(|| format!("Key {}: cannot convert from primitive to type {}", #key, stringify!(#ty)))),
}
None => // Try to construct T from Primitive::Null
match <#ty as Object>::from_primitive(::pdf::primitive::Primitive::Null, resolve) {
Ok(obj) => obj,
Err(_) => bail!("Object {}, Key {} not found", stringify!(#name), #key),
},
}
// ^ By using Primitive::Null when we don't find the key, we allow 'optional'
// types like Option and Vec to be constructed from non-existing values
};
}
}
}).collect(),
fields.iter().map(|field| {
let ref name = field.ident;
quote! { #name: #name, }
}).collect())
}
/// Returns code for from_primitive that accepts Dictionary
fn impl_from_dict(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let attrs = GlobalAttrs::from_ast(&ast);
let (let_parts, field_parts) = impl_parts(&fields);
let checks: Vec<_> = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
let ty = dict.remove(#key)
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: #key }))?
.to_name()?;
if ty!= #val {
bail!("[Dict entry /{}]!= /{}", #key, #val);
}
}
).collect();
let ty_check = match (attrs.type_name, attrs.type_required) {
(Some(ty), true) => quote! {
let ty = dict.remove("Type")
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: "Type" }))?
.to_name()?;
if ty!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
}
},
(Some(ty), false) => quote! {
match dict.remove("Type") {
Some(ty) => if ty.to_name()?!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
},
None => {}
}
},
(None, _) => quote!{}
};
quote! {
let mut dict = Dictionary::from_primitive(p, resolve)?;
#ty_check
#( #checks )*
#( #let_parts )*
Ok(#name {
#( #field_parts )*
})
}
}
| {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} | conditional_block |
lib.rs | //! `pdf_derive` provides a proc macro to derive the Object trait from the `pdf` crate.
//! # Usage
//! There are several ways to derive Object on a struct or enum:
//! ## 1. Struct from PDF Dictionary
//!
//! A lot of dictionary types defined in the PDF 1.7 reference have a finite amount of possible
//! fields. Each of these are usually either required or optional. The latter is achieved by using
//! a `Option<T>` or `Vec<T>` as type of a field.
//!
//! Usually, dictionary types
//! require that the entry `/Type` is some specific string. By default, `pdf_derive` assumes that
//! this should equal the name of the input struct. This can be overridden by setting the `Type`
//! attribute equal to either the expected value of the `/Type` entry, or to `false` in order to
//! omit the type check completly.
//!
//! Check similar to that of `/Type` can also be specified in the same manner. (but the `Type`
//! attribute is special because it accepts a bool).
//!
//! Examples:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type="XObject", Subtype="Image")]
//! /// A variant of XObject
//! pub struct ImageDictionary {
//! #[pdf(key="Width")]
//! width: i32,
//! #[pdf(key="Height")]
//! height: i32,
//! // [...]
//! }
//! ```
//!
//! This enforces that the dictionary's `/Type` entry is present and equals `/XObject`, and that the
//! `/Subtype` entry is present and equals `/Image`.
//!
//! Each field in the struct needs to implement `Object`. Implementation is provided already for
//! common types like i32, f32, usize, bool, String (from Primitive::Name), Option<T> and Vec<T>.
//! The two latter are initialized to default if the entry isn't found in the input dictionary.
//! Option<T> is therefore frequently used for fields that are optional according to the PDF
//! reference. Vec<T> can also be used for optional fields that can also be arrays (there are quite
//! a few of those in the PDF specs - one or many). However, as stated, it accepts absense of the
//! entry, so **required** fields of type array aren't yet facilitated for.
//!
//! Lastly, for each field, it's possible to define a default value by setting the `default`
//! attribute to a string that can parse as Rust code.
//!
//! Example:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type = "XRef")]
//! pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens |
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn impl_object_for_struct(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "<<")?;
#pdf_type
#( #checks_code )*
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
#from_primitive_code
}
}
}
}
/// Note: must have info and dict (TODO explain in docs)
fn impl_object_for_stream(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let info_ty = fields.iter()
.filter_map(|field| {
if let Some(ident) = field.ident.as_ref() {
if ident.as_ref() == "info" {
Some(field.ty.clone())
} else {
None
}
} else {
None
}
}).next().unwrap();
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, _out: &mut W) -> ::std::io::Result<()> {
unimplemented!();
/*
writeln!(out, "<<")?;
#type_code
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
*/
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
let ::pdf::primitive::PdfStream {info, data}
= p.to_stream(resolve).chain_err(|| stringify!(#name))?;
Ok(#name {
info: <#info_ty as Object>::from_primitive(::pdf::primitive::Primitive::Dictionary (info), resolve)?,
data: data,
})
}
}
}
}
/// Returns (let assignments, field assignments)
/// Example:
/// (`let name =...;`,
/// ` name: name`)
///
fn impl_parts(fields: &[Field]) -> (Vec<quote::Tokens>, Vec<quote::Tokens>) {
(fields.iter().map(|field| {
let (key, default, skip) = field_attrs(field);
if skip {
return quote! {}; // skip this field..
}
let ref name = field.ident;
let ty = field.ty.clone();
if let Some(ref default) = default {
let default = syn::parse_token_trees(&default).expect("Could not parse `default` code as Rust.");
quote! {
let #name = {
let primitive: Option<::pdf::primitive::Primitive>
= dict.remove(#key);
let x: #ty = match primitive {
Some(primitive) => <#ty as Object>::from_primitive(primitive, resolve).chain_err( || stringify!(#name) )?,
None => #( #default )*,
};
x
};
}
} else {
quote! {
let #name = {
match dict.remove(#key) {
Some(primitive) =>
match <#ty as Object>::from_primitive(primitive, resolve) {
Ok(obj) => obj,
Err(e) => bail!(e.chain_err(|| format!("Key {}: cannot convert from primitive to type {}", #key, stringify!(#ty)))),
}
None => // Try to construct T from Primitive::Null
match <#ty as Object>::from_primitive(::pdf::primitive::Primitive::Null, resolve) {
Ok(obj) => obj,
Err(_) => bail!("Object {}, Key {} not found", stringify!(#name), #key),
},
}
// ^ By using Primitive::Null when we don't find the key, we allow 'optional'
// types like Option and Vec to be constructed from non-existing values
};
}
}
}).collect(),
fields.iter().map(|field| {
let ref name = field.ident;
quote! { #name: #name, }
}).collect())
}
/// Returns code for from_primitive that accepts Dictionary
fn impl_from_dict(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let attrs = GlobalAttrs::from_ast(&ast);
let (let_parts, field_parts) = impl_parts(&fields);
let checks: Vec<_> = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
let ty = dict.remove(#key)
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: #key }))?
.to_name()?;
if ty!= #val {
bail!("[Dict entry /{}]!= /{}", #key, #val);
}
}
).collect();
let ty_check = match (attrs.type_name, attrs.type_required) {
(Some(ty), true) => quote! {
let ty = dict.remove("Type")
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: "Type" }))?
.to_name()?;
if ty!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
}
},
(Some(ty), false) => quote! {
match dict.remove("Type") {
Some(ty) => if ty.to_name()?!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
},
None => {}
}
},
(None, _) => quote!{}
};
quote! {
let mut dict = Dictionary::from_primitive(p, resolve)?;
#ty_check
#( #checks )*
#( #let_parts )*
Ok(#name {
#( #field_parts )*
})
}
}
| {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream {
match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
} | identifier_body |
lib.rs | //! `pdf_derive` provides a proc macro to derive the Object trait from the `pdf` crate.
//! # Usage
//! There are several ways to derive Object on a struct or enum:
//! ## 1. Struct from PDF Dictionary
//!
//! A lot of dictionary types defined in the PDF 1.7 reference have a finite amount of possible
//! fields. Each of these are usually either required or optional. The latter is achieved by using
//! a `Option<T>` or `Vec<T>` as type of a field.
//!
//! Usually, dictionary types
//! require that the entry `/Type` is some specific string. By default, `pdf_derive` assumes that
//! this should equal the name of the input struct. This can be overridden by setting the `Type`
//! attribute equal to either the expected value of the `/Type` entry, or to `false` in order to
//! omit the type check completly.
//!
//! Check similar to that of `/Type` can also be specified in the same manner. (but the `Type`
//! attribute is special because it accepts a bool).
//!
//! Examples:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type="XObject", Subtype="Image")]
//! /// A variant of XObject
//! pub struct ImageDictionary {
//! #[pdf(key="Width")]
//! width: i32,
//! #[pdf(key="Height")]
//! height: i32,
//! // [...]
//! }
//! ```
//!
//! This enforces that the dictionary's `/Type` entry is present and equals `/XObject`, and that the
//! `/Subtype` entry is present and equals `/Image`.
//!
//! Each field in the struct needs to implement `Object`. Implementation is provided already for
//! common types like i32, f32, usize, bool, String (from Primitive::Name), Option<T> and Vec<T>.
//! The two latter are initialized to default if the entry isn't found in the input dictionary.
//! Option<T> is therefore frequently used for fields that are optional according to the PDF
//! reference. Vec<T> can also be used for optional fields that can also be arrays (there are quite
//! a few of those in the PDF specs - one or many). However, as stated, it accepts absense of the
//! entry, so **required** fields of type array aren't yet facilitated for.
//!
//! Lastly, for each field, it's possible to define a default value by setting the `default`
//! attribute to a string that can parse as Rust code.
//!
//! Example:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type = "XRef")]
//! pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream { | match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
}
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn impl_object_for_struct(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "<<")?;
#pdf_type
#( #checks_code )*
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
#from_primitive_code
}
}
}
}
/// Note: must have info and dict (TODO explain in docs)
fn impl_object_for_stream(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let info_ty = fields.iter()
.filter_map(|field| {
if let Some(ident) = field.ident.as_ref() {
if ident.as_ref() == "info" {
Some(field.ty.clone())
} else {
None
}
} else {
None
}
}).next().unwrap();
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, _out: &mut W) -> ::std::io::Result<()> {
unimplemented!();
/*
writeln!(out, "<<")?;
#type_code
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
*/
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
let ::pdf::primitive::PdfStream {info, data}
= p.to_stream(resolve).chain_err(|| stringify!(#name))?;
Ok(#name {
info: <#info_ty as Object>::from_primitive(::pdf::primitive::Primitive::Dictionary (info), resolve)?,
data: data,
})
}
}
}
}
/// Returns (let assignments, field assignments)
/// Example:
/// (`let name =...;`,
/// ` name: name`)
///
fn impl_parts(fields: &[Field]) -> (Vec<quote::Tokens>, Vec<quote::Tokens>) {
(fields.iter().map(|field| {
let (key, default, skip) = field_attrs(field);
if skip {
return quote! {}; // skip this field..
}
let ref name = field.ident;
let ty = field.ty.clone();
if let Some(ref default) = default {
let default = syn::parse_token_trees(&default).expect("Could not parse `default` code as Rust.");
quote! {
let #name = {
let primitive: Option<::pdf::primitive::Primitive>
= dict.remove(#key);
let x: #ty = match primitive {
Some(primitive) => <#ty as Object>::from_primitive(primitive, resolve).chain_err( || stringify!(#name) )?,
None => #( #default )*,
};
x
};
}
} else {
quote! {
let #name = {
match dict.remove(#key) {
Some(primitive) =>
match <#ty as Object>::from_primitive(primitive, resolve) {
Ok(obj) => obj,
Err(e) => bail!(e.chain_err(|| format!("Key {}: cannot convert from primitive to type {}", #key, stringify!(#ty)))),
}
None => // Try to construct T from Primitive::Null
match <#ty as Object>::from_primitive(::pdf::primitive::Primitive::Null, resolve) {
Ok(obj) => obj,
Err(_) => bail!("Object {}, Key {} not found", stringify!(#name), #key),
},
}
// ^ By using Primitive::Null when we don't find the key, we allow 'optional'
// types like Option and Vec to be constructed from non-existing values
};
}
}
}).collect(),
fields.iter().map(|field| {
let ref name = field.ident;
quote! { #name: #name, }
}).collect())
}
/// Returns code for from_primitive that accepts Dictionary
fn impl_from_dict(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let attrs = GlobalAttrs::from_ast(&ast);
let (let_parts, field_parts) = impl_parts(&fields);
let checks: Vec<_> = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
let ty = dict.remove(#key)
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: #key }))?
.to_name()?;
if ty!= #val {
bail!("[Dict entry /{}]!= /{}", #key, #val);
}
}
).collect();
let ty_check = match (attrs.type_name, attrs.type_required) {
(Some(ty), true) => quote! {
let ty = dict.remove("Type")
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: "Type" }))?
.to_name()?;
if ty!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
}
},
(Some(ty), false) => quote! {
match dict.remove("Type") {
Some(ty) => if ty.to_name()?!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
},
None => {}
}
},
(None, _) => quote!{}
};
quote! {
let mut dict = Dictionary::from_primitive(p, resolve)?;
#ty_check
#( #checks )*
#( #let_parts )*
Ok(#name {
#( #field_parts )*
})
}
} | random_line_split |
|
lib.rs | //! `pdf_derive` provides a proc macro to derive the Object trait from the `pdf` crate.
//! # Usage
//! There are several ways to derive Object on a struct or enum:
//! ## 1. Struct from PDF Dictionary
//!
//! A lot of dictionary types defined in the PDF 1.7 reference have a finite amount of possible
//! fields. Each of these are usually either required or optional. The latter is achieved by using
//! a `Option<T>` or `Vec<T>` as type of a field.
//!
//! Usually, dictionary types
//! require that the entry `/Type` is some specific string. By default, `pdf_derive` assumes that
//! this should equal the name of the input struct. This can be overridden by setting the `Type`
//! attribute equal to either the expected value of the `/Type` entry, or to `false` in order to
//! omit the type check completly.
//!
//! Check similar to that of `/Type` can also be specified in the same manner. (but the `Type`
//! attribute is special because it accepts a bool).
//!
//! Examples:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type="XObject", Subtype="Image")]
//! /// A variant of XObject
//! pub struct ImageDictionary {
//! #[pdf(key="Width")]
//! width: i32,
//! #[pdf(key="Height")]
//! height: i32,
//! // [...]
//! }
//! ```
//!
//! This enforces that the dictionary's `/Type` entry is present and equals `/XObject`, and that the
//! `/Subtype` entry is present and equals `/Image`.
//!
//! Each field in the struct needs to implement `Object`. Implementation is provided already for
//! common types like i32, f32, usize, bool, String (from Primitive::Name), Option<T> and Vec<T>.
//! The two latter are initialized to default if the entry isn't found in the input dictionary.
//! Option<T> is therefore frequently used for fields that are optional according to the PDF
//! reference. Vec<T> can also be used for optional fields that can also be arrays (there are quite
//! a few of those in the PDF specs - one or many). However, as stated, it accepts absense of the
//! entry, so **required** fields of type array aren't yet facilitated for.
//!
//! Lastly, for each field, it's possible to define a default value by setting the `default`
//! attribute to a string that can parse as Rust code.
//!
//! Example:
//!
//! ```
//! #[derive(Object)]
//! #[pdf(Type = "XRef")]
//! pub struct XRefInfo {
//! #[pdf(key = "Filter")]
//! filter: Vec<StreamFilter>,
//! #[pdf(key = "Size")]
//! pub size: i32,
//! #[pdf(key = "Index", default = "vec![0, size]")]
//! pub index: Vec<i32>,
//! // [...]
//! }
//! ```
//!
//!
//! ## 2. Struct from PDF Stream
//! PDF Streams consist of a stream dictionary along with the stream itself. It is assumed that all
//! structs that want to derive Object where the primitive it converts from is a stream,
//! have a field `info: T`, where `T: Object`, and a field `data: Vec<u8>`.
//!
//! Deriving an Object that converts from Primitive::Stream, the flag `is_stream` is required in
//! the proc macro attributes.
//!
//! ## 3. Enum from PDF Name
//! Example:
//!
//! ```
//! #[derive(Object, Debug)]
//! pub enum StreamFilter {
//! ASCIIHexDecode,
//! ASCII85Decode,
//! LZWDecode,
//! FlateDecode,
//! JPXDecode,
//! DCTDecode,
//! }
//! ```
//!
//! In this case, `StreamFilter::from_primitive(primitive)` will return Ok(_) only if the primitive
//! is `Primitive::Name` and matches one of the enum variants
#![recursion_limit="128"]
extern crate proc_macro;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
use syn::*;
// Debugging:
/*
use std::fs::{OpenOptions};
use std::io::Write;
*/
#[proc_macro_derive(Object, attributes(pdf))]
pub fn object(input: TokenStream) -> TokenStream {
// Construct a string representation of the type definition
let s = input.to_string();
// Parse the string representation
let ast = syn::parse_derive_input(&s).unwrap();
// Build the impl
let gen = impl_object(&ast);
// Debugging
/*
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open("/tmp/proj/src/main.rs")
.unwrap();
write!(file, "{}", gen).unwrap();
*/
// Return the generated impl
gen.parse().unwrap()
}
/// Returns (key, default, skip)
fn field_attrs(field: &Field) -> (String, Option<String>, bool) {
field.attrs.iter()
.filter_map(|attr| match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
let (mut key, mut default, mut skip) = (None, None, false);
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "key"
=> key = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, Lit::Str(ref value, _)))
if ident == "default"
=> default = Some(value.clone()),
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "skip"
=> skip = true,
_ => panic!(r##"Derive error - Supported derive attributes: `key="Key"`, `default="some code"`."##)
}
}
let key = match skip {
true => String::from(""),
false => key.expect("attr `key` missing"),
};
Some(( key, default, skip))
},
_ => None
}).next().expect("no pdf meta attribute")
}
/// Just the attributes for the whole struct
#[derive(Default)]
struct GlobalAttrs {
/// List of checks to do in the dictionary (LHS is the key, RHS is the expected value)
checks: Vec<(String, String)>,
type_name: Option<String>,
type_required: bool,
is_stream: bool,
}
impl GlobalAttrs {
/// The PDF type may be explicitly specified as an attribute with type "Type". Else, it is the name
/// of the struct.
fn from_ast(ast: &DeriveInput) -> GlobalAttrs {
let mut attrs = GlobalAttrs::default();
for attr in &ast.attrs {
match attr.value {
MetaItem::List(ref ident, ref list) if ident == "pdf" => {
// Loop through list of attributes
for meta in list {
match *meta {
NestedMetaItem::MetaItem(MetaItem::NameValue(ref ident, ref value))
=> if ident == "Type" {
match *value {
Lit::Str(ref value, _) => {
if value.ends_with("?") {
attrs.type_name = Some(value[.. value.len()-1].to_string());
attrs.type_required = false;
} else {
attrs.type_name = Some(value.clone());
attrs.type_required = true;
}
},
_ => panic!("Value of 'Type' attribute must be a String."),
}
} else {
match *value {
Lit::Str(ref value, _) => attrs.checks.push((String::from(ident.as_ref()), value.clone())),
_ => panic!("Other checks must have RHS String."),
}
},
NestedMetaItem::MetaItem(MetaItem::Word(ref ident))
if ident == "is_stream" => attrs.is_stream = true,
_ => {}
}
}
},
_ => {}
}
}
attrs
}
}
fn impl_object(ast: &DeriveInput) -> quote::Tokens {
let attrs = GlobalAttrs::from_ast(&ast);
if attrs.is_stream {
match ast.body {
Body::Struct(ref data) => impl_object_for_stream(ast, data.fields()),
Body::Enum(_) => panic!("Enum can't be a PDF stream"),
}
} else {
match ast.body {
Body::Struct(ref data) => impl_object_for_struct(ast, data.fields()),
Body::Enum(ref variants) => impl_object_for_enum(ast, variants),
}
}
}
/// Accepts Name to construct enum
fn impl_object_for_enum(ast: &DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let ser_code: Vec<_> = variants.iter().map(|var| {
quote! {
#id::#var => stringify!(#id::#var),
}
}).collect();
let from_primitive_code = impl_from_name(ast, variants);
quote! {
impl #impl_generics ::pdf::object::Object for #id #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "/{}",
match *self {
#( #ser_code )*
}
)
}
fn from_primitive(p: Primitive, _resolve: &Resolve) -> ::pdf::Result<Self> {
#from_primitive_code
}
}
}
}
/// Returns code for from_primitive that accepts Name
fn impl_from_name(ast: &syn::DeriveInput, variants: &Vec<Variant>) -> quote::Tokens {
let id = &ast.ident;
let parts: Vec<quote::Tokens> = variants.iter().map(|var| {
quote! {
stringify!(#var) => #id::#var,
}
}).collect();
quote! {
Ok(
match p {
Primitive::Name (name) => {
match name.as_str() {
#( #parts )*
s => bail!(format!("Enum {} from_primitive: no variant {}.", stringify!(#id), s)),
}
}
_ => bail!(::pdf::Error::from(::pdf::ErrorKind::UnexpectedPrimitive { expected: "Name", found: p.get_debug_name() })),
}
)
}
}
/// Accepts Dictionary to construct a struct
fn | (ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let attrs = GlobalAttrs::from_ast(&ast);
let parts: Vec<_> = fields.iter()
.map(|field| {
let (key, default, skip) = field_attrs(field);
(field.ident.clone(), key, default, skip)
}).collect();
// Implement serialize()
let fields_ser = parts.iter()
.map( |&(ref field, ref key, ref _default, skip)|
if skip {
quote! {}
} else {
quote! {
write!(out, "{} ", #key)?;
self.#field.serialize(out)?;
writeln!(out, "")?;
}
}
);
let checks_code = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
writeln!(out, "/{} /{}", #key, #val)?;
}
);
// Implement from_primitive()
let from_primitive_code = impl_from_dict(ast, fields);
let pdf_type = match attrs.type_name {
Some(ref ty) => quote! { writeln!(out, "/Type /{}", #ty)?; },
None => quote! {}
};
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, out: &mut W) -> ::std::io::Result<()> {
writeln!(out, "<<")?;
#pdf_type
#( #checks_code )*
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
#from_primitive_code
}
}
}
}
/// Note: must have info and dict (TODO explain in docs)
fn impl_object_for_stream(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let info_ty = fields.iter()
.filter_map(|field| {
if let Some(ident) = field.ident.as_ref() {
if ident.as_ref() == "info" {
Some(field.ty.clone())
} else {
None
}
} else {
None
}
}).next().unwrap();
quote! {
impl #impl_generics ::pdf::object::Object for #name #ty_generics #where_clause {
fn serialize<W: ::std::io::Write>(&self, _out: &mut W) -> ::std::io::Result<()> {
unimplemented!();
/*
writeln!(out, "<<")?;
#type_code
#(#fields_ser)*
writeln!(out, ">>")?;
Ok(())
*/
}
fn from_primitive(p: Primitive, resolve: &Resolve) -> Result<Self> {
let ::pdf::primitive::PdfStream {info, data}
= p.to_stream(resolve).chain_err(|| stringify!(#name))?;
Ok(#name {
info: <#info_ty as Object>::from_primitive(::pdf::primitive::Primitive::Dictionary (info), resolve)?,
data: data,
})
}
}
}
}
/// Returns (let assignments, field assignments)
/// Example:
/// (`let name =...;`,
/// ` name: name`)
///
fn impl_parts(fields: &[Field]) -> (Vec<quote::Tokens>, Vec<quote::Tokens>) {
(fields.iter().map(|field| {
let (key, default, skip) = field_attrs(field);
if skip {
return quote! {}; // skip this field..
}
let ref name = field.ident;
let ty = field.ty.clone();
if let Some(ref default) = default {
let default = syn::parse_token_trees(&default).expect("Could not parse `default` code as Rust.");
quote! {
let #name = {
let primitive: Option<::pdf::primitive::Primitive>
= dict.remove(#key);
let x: #ty = match primitive {
Some(primitive) => <#ty as Object>::from_primitive(primitive, resolve).chain_err( || stringify!(#name) )?,
None => #( #default )*,
};
x
};
}
} else {
quote! {
let #name = {
match dict.remove(#key) {
Some(primitive) =>
match <#ty as Object>::from_primitive(primitive, resolve) {
Ok(obj) => obj,
Err(e) => bail!(e.chain_err(|| format!("Key {}: cannot convert from primitive to type {}", #key, stringify!(#ty)))),
}
None => // Try to construct T from Primitive::Null
match <#ty as Object>::from_primitive(::pdf::primitive::Primitive::Null, resolve) {
Ok(obj) => obj,
Err(_) => bail!("Object {}, Key {} not found", stringify!(#name), #key),
},
}
// ^ By using Primitive::Null when we don't find the key, we allow 'optional'
// types like Option and Vec to be constructed from non-existing values
};
}
}
}).collect(),
fields.iter().map(|field| {
let ref name = field.ident;
quote! { #name: #name, }
}).collect())
}
/// Returns code for from_primitive that accepts Dictionary
fn impl_from_dict(ast: &DeriveInput, fields: &[Field]) -> quote::Tokens {
let name = &ast.ident;
let attrs = GlobalAttrs::from_ast(&ast);
let (let_parts, field_parts) = impl_parts(&fields);
let checks: Vec<_> = attrs.checks.iter().map(|&(ref key, ref val)|
quote! {
let ty = dict.remove(#key)
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: #key }))?
.to_name()?;
if ty!= #val {
bail!("[Dict entry /{}]!= /{}", #key, #val);
}
}
).collect();
let ty_check = match (attrs.type_name, attrs.type_required) {
(Some(ty), true) => quote! {
let ty = dict.remove("Type")
.ok_or(::pdf::Error::from(::pdf::ErrorKind::EntryNotFound { key: "Type" }))?
.to_name()?;
if ty!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
}
},
(Some(ty), false) => quote! {
match dict.remove("Type") {
Some(ty) => if ty.to_name()?!= #ty {
bail!("[Dict entry /{}]!= /{}", "Type", #ty);
},
None => {}
}
},
(None, _) => quote!{}
};
quote! {
let mut dict = Dictionary::from_primitive(p, resolve)?;
#ty_check
#( #checks )*
#( #let_parts )*
Ok(#name {
#( #field_parts )*
})
}
}
| impl_object_for_struct | identifier_name |
main.rs | error::UrlParseSnafu {
url: &config.targets_base_url,
})?,
)
.transport(transport)
.load()
.context(error::MetadataSnafu)
}
fn applicable_updates<'a>(
manifest: &'a Manifest,
variant: &str,
ignore_waves: bool,
seed: u32,
) -> Vec<&'a Update> {
let mut updates: Vec<&Update> = manifest
.updates
.iter()
.filter(|u| {
u.variant == *variant
&& u.arch == TARGET_ARCH
&& u.version <= u.max_version
&& (ignore_waves || u.update_ready(seed, Utc::now()))
})
.collect();
// sort descending
updates.sort_unstable_by(|a, b| b.version.cmp(&a.version));
updates
}
// TODO use config if there is api-sourced configuration that could affect this
// TODO updog.toml may include settings that cause us to ignore/delay
// certain/any updates;
// Ignore Specific Target Version
// Ignore Any Target
// ...
fn update_required<'a>(
manifest: &'a Manifest,
version: &Version,
variant: &str,
ignore_waves: bool,
seed: u32,
version_lock: &str,
force_version: Option<Version>,
) -> Result<Option<&'a Update>> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if let Some(forced_version) = force_version {
return Ok(updates.into_iter().find(|u| u.version == forced_version));
}
if version_lock!= "latest" {
// Make sure the version string from the config is a valid version string that might be prefixed with 'v'
let friendly_version_lock =
FriendlyVersion::try_from(version_lock).context(error::BadVersionConfigSnafu {
version_str: version_lock,
})?;
// Convert back to semver::Version
let semver_version_lock =
friendly_version_lock
.try_into()
.context(error::BadVersionSnafu {
version_str: version_lock,
})?;
// If the configured version-lock matches our current version, we won't update to the same version
return if semver_version_lock == *version {
Ok(None)
} else {
Ok(updates
.into_iter()
.find(|u| u.version == semver_version_lock))
};
}
for update in updates {
// If the current running version is greater than the max version ever published,
// or moves us to a valid version <= the maximum version, update.
if *version < update.version || *version > update.max_version {
return Ok(Some(update));
}
}
Ok(None)
}
fn write_target_to_disk<P: AsRef<Path>>(
repository: &Repository,
target: &str,
disk_path: P,
) -> Result<()> {
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
let reader = repository
.read_target(&target)
.context(error::MetadataSnafu)?
.context(error::TargetNotFoundSnafu {
target: target.raw(),
})?;
// Note: the file extension for the compression type we're using should be removed in
// retrieve_migrations below.
let mut reader = lz4::Decoder::new(reader).context(error::Lz4DecodeSnafu {
target: target.raw(),
})?;
let mut f = OpenOptions::new()
.write(true)
.create(true)
.open(disk_path.as_ref())
.context(error::OpenPartitionSnafu {
path: disk_path.as_ref(),
})?;
io::copy(&mut reader, &mut f).context(error::WriteUpdateSnafu)?;
Ok(())
}
/// Store required migrations for an update in persistent storage. All intermediate migrations
/// between the current version and the target version must be retrieved.
fn retrieve_migrations(
repository: &Repository,
query_params: &mut QueryParams,
manifest: &Manifest,
update: &Update,
current_version: &Version,
) -> Result<()> {
// the migrations required for foo to bar and bar to foo are
// the same; we can pretend we're always upgrading from foo to
// bar and use the same logic to obtain the migrations
let target = std::cmp::max(&update.version, current_version);
let start = std::cmp::min(&update.version, current_version);
let dir = Path::new(MIGRATION_PATH);
if!dir.exists() {
fs::create_dir(dir).context(error::DirCreateSnafu { path: &dir })?;
}
// find the list of migrations in the manifest based on our from and to versions.
let mut targets = find_migrations(start, target, manifest)?;
// we need to store the manifest so that migrator can independently and securely determine the
// migration list. this is true even if there are no migrations.
targets.push("manifest.json".to_owned());
repository
.cache(METADATA_PATH, MIGRATION_PATH, Some(&targets), true)
.context(error::RepoCacheMigrationsSnafu)?;
// Set a query parameter listing the required migrations
query_params.add("migrations", targets.join(","));
Ok(())
}
fn update_image(update: &Update, repository: &Repository) -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.clear_inactive();
// Write out the clearing of the inactive partition immediately, because we're about to
// overwrite the partition set with update data and don't want it to be used until we
// know we're done with all components.
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
let inactive = gpt_state.inactive_set();
// TODO Do we want to recover the inactive side on an error?
write_target_to_disk(repository, &update.images.root, &inactive.root)?;
write_target_to_disk(repository, &update.images.boot, &inactive.boot)?;
write_target_to_disk(repository, &update.images.hash, &inactive.hash)?;
gpt_state.mark_inactive_valid();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state
.upgrade_to_inactive()
.context(error::InactivePartitionUpgradeSnafu)?;
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn revert_update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.cancel_upgrade();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn set_common_query_params(
query_params: &mut QueryParams,
current_version: &Version,
config: &Config,
) {
query_params.add("version", current_version.to_string());
query_params.add("seed", config.seed.to_string());
}
/// List any available update that matches the current variant
fn list_updates(
manifest: &Manifest,
variant: &str,
json: bool,
ignore_waves: bool,
seed: u32,
) -> Result<()> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if json {
println!(
"{}",
serde_json::to_string_pretty(&updates).context(error::UpdateSerializeSnafu)?
);
} else {
for u in updates {
eprintln!("{}", &fmt_full_version(u));
}
}
Ok(())
}
/// Struct to hold the specified command line argument values
#[allow(clippy::struct_excessive_bools)]
struct Arguments {
subcommand: String,
log_level: LevelFilter,
json: bool,
ignore_waves: bool,
force_version: Option<Version>,
all: bool,
reboot: bool,
variant: Option<String>,
}
/// Parse the command line arguments to get the user-specified values
fn parse_args(args: std::env::Args) -> Arguments {
let mut subcommand = None;
let mut log_level = None;
let mut update_version = None;
let mut ignore_waves = false;
let mut json = false;
let mut all = false;
let mut reboot = false;
let mut variant = None;
let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--log-level" => {
let log_level_str = iter
.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --log-level"));
log_level =
Some(LevelFilter::from_str(&log_level_str).unwrap_or_else(|_| {
usage_msg(format!("Invalid log level '{log_level_str}'"))
}));
}
"-i" | "--image" => match iter.next() {
Some(v) => match Version::parse(&v) {
Ok(v) => update_version = Some(v),
_ => usage(),
},
_ => usage(),
},
"--variant" => {
variant = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --variant")),
);
}
"-n" | "--now" | "--ignore-waves" => {
ignore_waves = true;
}
"-j" | "--json" => {
json = true;
}
"-r" | "--reboot" => {
reboot = true;
}
"-a" | "--all" => {
all = true;
}
// Assume any arguments not prefixed with '-' is a subcommand
s if!s.starts_with('-') => {
if subcommand.is_some() {
usage();
}
subcommand = Some(s.to_string());
}
_ => usage(),
}
}
Arguments {
subcommand: subcommand.unwrap_or_else(|| usage()),
log_level: log_level.unwrap_or(LevelFilter::Info),
json,
ignore_waves,
force_version: update_version,
all,
reboot,
variant,
}
}
fn fmt_full_version(update: &Update) -> String {
format!("{} {}", update.variant, update.version)
}
fn output<T: Serialize>(json: bool, object: T, string: &str) -> Result<()> {
if json {
println!(
"{}",
serde_json::to_string_pretty(&object).context(error::UpdateSerializeSnafu)?
);
} else {
println!("{string}");
}
Ok(())
}
fn initiate_reboot() -> Result<()> {
// Set up signal handler for termination signals
let mut signals = Signals::new([SIGTERM]).context(error::SignalSnafu)?;
let signals_handle = signals.handle();
thread::spawn(move || {
for _sig in signals.forever() {
// Ignore termination signals in case updog gets terminated
// before getting to exit normally by itself after invoking
// `shutdown -r` to complete the update.
}
});
if let Err(err) = process::Command::new("shutdown") | .arg("-r")
.status()
.context(error::RebootFailureSnafu)
{
// Kill the signal handling thread
signals_handle.close();
return Err(err);
}
Ok(())
}
/// Our underlying HTTP client, reqwest, supports proxies by reading the `HTTPS_PROXY` and `NO_PROXY`
/// environment variables. Bottlerocket services can source proxy.env before running, but updog is
/// not a service, so we read these values from the config file and add them to the environment
/// here.
fn set_https_proxy_environment_variables(
https_proxy: &Option<String>,
no_proxy: &Option<Vec<String>>,
) {
let proxy = match https_proxy {
Some(s) if!s.is_empty() => s.clone(),
// without https_proxy, no_proxy does nothing, so we are done
_ => return,
};
std::env::set_var("HTTPS_PROXY", proxy);
if let Some(no_proxy) = no_proxy {
if!no_proxy.is_empty() {
let no_proxy_string = no_proxy.join(",");
debug!("setting NO_PROXY={}", no_proxy_string);
std::env::set_var("NO_PROXY", &no_proxy_string);
}
}
}
#[allow(clippy::too_many_lines)]
fn main_inner() -> Result<()> {
// Parse and store the arguments passed to the program
let arguments = parse_args(std::env::args());
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(arguments.log_level, LogConfig::default()).context(error::LoggerSnafu)?;
let command =
serde_plain::from_str::<Command>(&arguments.subcommand).unwrap_or_else(|_| usage());
let config = load_config()?;
set_https_proxy_environment_variables(&config.https_proxy, &config.no_proxy);
let current_release = BottlerocketRelease::new().context(error::ReleaseVersionSnafu)?;
let variant = arguments.variant.unwrap_or(current_release.variant_id);
let transport = HttpQueryTransport::new();
// get a shared pointer to the transport's query_params so we can add metrics information to
// the transport's HTTP calls.
let mut query_params = transport.query_params();
set_common_query_params(&mut query_params, ¤t_release.version_id, &config);
let repository = load_repository(transport, &config)?;
let manifest = load_manifest(&repository)?;
let ignore_waves = arguments.ignore_waves || config.ignore_waves;
match command {
Command::CheckUpdate | Command::Whats => {
if arguments.all {
return list_updates(
&manifest,
&variant,
arguments.json,
ignore_waves,
config.seed,
);
}
let update = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)?
.context(error::UpdateNotAvailableSnafu)?;
output(arguments.json, update, &fmt_full_version(update))?;
}
Command::Update | Command::UpdateImage => {
if let Some(u) = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)? {
eprintln!("Starting update to {}", u.version);
query_params.add("target", u.version.to_string());
retrieve_migrations(
&repository,
&mut query_params,
&manifest,
u,
¤t_release.version_id,
)?;
update_image(u, &repository)?;
if command == Command::Update {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
output(
arguments.json,
u,
&format!("Update applied: {}", fmt_full_version(u)),
)?;
} else {
eprintln!("No update required");
}
}
Command::UpdateApply => {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
Command::UpdateRevert => {
revert_update_flags()?;
}
Command::Prepare => {
// TODO unimplemented
}
}
Ok(())
}
fn load_manifest(repository: &tough::Repository) -> Result<Manifest> {
let target = "manifest.json";
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
Manifest::from_json(
repository
.read_target(&target)
.context(error::ManifestLoadSnafu)?
.context(error::ManifestNotFoundSnafu)?,
)
.context(error::ManifestParseSnafu)
}
fn main() ->! {
std::process::exit(match main_inner() {
Ok(()) => 0,
Err(err) => {
eprintln!("{err}");
if let Some(var) = std::env::var_os("RUST_BACKTRACE") {
if var!= "0" {
if let Some(backtrace) = err.backtrace() {
eprintln!("\n{backtrace:?}");
}
}
}
1
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration as TestDuration;
use std::collections::BTreeMap;
use update_metadata::Images;
#[test]
fn test_manifest_json() {
// Loads a general example of a manifest that includes an update with waves,
// a set of migrations, and some datastore mappings.
// This tests checks that it parses and the following properties are correct:
// - the (1.0, 1.1) migrations exist with the migration "migrate_1.1_foo"
// - the image:datastore mappings exist
// - there is a mapping between 1.11.0 and 1.0
let path = "tests/data/example.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(
!manifest.updates.is_empty(),
"Failed to parse update manifest"
);
assert!(
!manifest.migrations.is_empty(),
"Failed to parse migrations"
);
let from = Version::parse("1.11.0").unwrap();
let to = Version::parse("1.12.0").unwrap();
assert!(manifest
.migrations
.contains_key(&(from.clone(), to.clone())));
let migration = manifest.migrations.get(&(from, to)).unwrap();
assert!(migration[0] == "migrate_1.12.0_foo");
}
#[test]
fn test_serde_reader() {
// A basic manifest with a single update, no migrations, and two
// image:datastore mappings
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(!manifest.updates.is_empty());
}
#[test]
fn test_versions() {
// A manifest with a single update whose version exceeds the max version.
// update in manifest has
// - version: 1.25.0
// - max_version: 1.20.0
let path = "tests/data/regret.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.18.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
assert!(
update_required(
&manifest,
| random_line_split |
|
main.rs | ::UrlParseSnafu {
url: &config.targets_base_url,
})?,
)
.transport(transport)
.load()
.context(error::MetadataSnafu)
}
fn applicable_updates<'a>(
manifest: &'a Manifest,
variant: &str,
ignore_waves: bool,
seed: u32,
) -> Vec<&'a Update> {
let mut updates: Vec<&Update> = manifest
.updates
.iter()
.filter(|u| {
u.variant == *variant
&& u.arch == TARGET_ARCH
&& u.version <= u.max_version
&& (ignore_waves || u.update_ready(seed, Utc::now()))
})
.collect();
// sort descending
updates.sort_unstable_by(|a, b| b.version.cmp(&a.version));
updates
}
// TODO use config if there is api-sourced configuration that could affect this
// TODO updog.toml may include settings that cause us to ignore/delay
// certain/any updates;
// Ignore Specific Target Version
// Ignore Any Target
// ...
fn update_required<'a>(
manifest: &'a Manifest,
version: &Version,
variant: &str,
ignore_waves: bool,
seed: u32,
version_lock: &str,
force_version: Option<Version>,
) -> Result<Option<&'a Update>> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if let Some(forced_version) = force_version {
return Ok(updates.into_iter().find(|u| u.version == forced_version));
}
if version_lock!= "latest" {
// Make sure the version string from the config is a valid version string that might be prefixed with 'v'
let friendly_version_lock =
FriendlyVersion::try_from(version_lock).context(error::BadVersionConfigSnafu {
version_str: version_lock,
})?;
// Convert back to semver::Version
let semver_version_lock =
friendly_version_lock
.try_into()
.context(error::BadVersionSnafu {
version_str: version_lock,
})?;
// If the configured version-lock matches our current version, we won't update to the same version
return if semver_version_lock == *version {
Ok(None)
} else {
Ok(updates
.into_iter()
.find(|u| u.version == semver_version_lock))
};
}
for update in updates {
// If the current running version is greater than the max version ever published,
// or moves us to a valid version <= the maximum version, update.
if *version < update.version || *version > update.max_version {
return Ok(Some(update));
}
}
Ok(None)
}
fn write_target_to_disk<P: AsRef<Path>>(
repository: &Repository,
target: &str,
disk_path: P,
) -> Result<()> {
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
let reader = repository
.read_target(&target)
.context(error::MetadataSnafu)?
.context(error::TargetNotFoundSnafu {
target: target.raw(),
})?;
// Note: the file extension for the compression type we're using should be removed in
// retrieve_migrations below.
let mut reader = lz4::Decoder::new(reader).context(error::Lz4DecodeSnafu {
target: target.raw(),
})?;
let mut f = OpenOptions::new()
.write(true)
.create(true)
.open(disk_path.as_ref())
.context(error::OpenPartitionSnafu {
path: disk_path.as_ref(),
})?;
io::copy(&mut reader, &mut f).context(error::WriteUpdateSnafu)?;
Ok(())
}
/// Store required migrations for an update in persistent storage. All intermediate migrations
/// between the current version and the target version must be retrieved.
fn retrieve_migrations(
repository: &Repository,
query_params: &mut QueryParams,
manifest: &Manifest,
update: &Update,
current_version: &Version,
) -> Result<()> {
// the migrations required for foo to bar and bar to foo are
// the same; we can pretend we're always upgrading from foo to
// bar and use the same logic to obtain the migrations
let target = std::cmp::max(&update.version, current_version);
let start = std::cmp::min(&update.version, current_version);
let dir = Path::new(MIGRATION_PATH);
if!dir.exists() {
fs::create_dir(dir).context(error::DirCreateSnafu { path: &dir })?;
}
// find the list of migrations in the manifest based on our from and to versions.
let mut targets = find_migrations(start, target, manifest)?;
// we need to store the manifest so that migrator can independently and securely determine the
// migration list. this is true even if there are no migrations.
targets.push("manifest.json".to_owned());
repository
.cache(METADATA_PATH, MIGRATION_PATH, Some(&targets), true)
.context(error::RepoCacheMigrationsSnafu)?;
// Set a query parameter listing the required migrations
query_params.add("migrations", targets.join(","));
Ok(())
}
fn update_image(update: &Update, repository: &Repository) -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.clear_inactive();
// Write out the clearing of the inactive partition immediately, because we're about to
// overwrite the partition set with update data and don't want it to be used until we
// know we're done with all components.
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
let inactive = gpt_state.inactive_set();
// TODO Do we want to recover the inactive side on an error?
write_target_to_disk(repository, &update.images.root, &inactive.root)?;
write_target_to_disk(repository, &update.images.boot, &inactive.boot)?;
write_target_to_disk(repository, &update.images.hash, &inactive.hash)?;
gpt_state.mark_inactive_valid();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state
.upgrade_to_inactive()
.context(error::InactivePartitionUpgradeSnafu)?;
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn | () -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.cancel_upgrade();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn set_common_query_params(
query_params: &mut QueryParams,
current_version: &Version,
config: &Config,
) {
query_params.add("version", current_version.to_string());
query_params.add("seed", config.seed.to_string());
}
/// List any available update that matches the current variant
fn list_updates(
manifest: &Manifest,
variant: &str,
json: bool,
ignore_waves: bool,
seed: u32,
) -> Result<()> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if json {
println!(
"{}",
serde_json::to_string_pretty(&updates).context(error::UpdateSerializeSnafu)?
);
} else {
for u in updates {
eprintln!("{}", &fmt_full_version(u));
}
}
Ok(())
}
/// Struct to hold the specified command line argument values
#[allow(clippy::struct_excessive_bools)]
struct Arguments {
subcommand: String,
log_level: LevelFilter,
json: bool,
ignore_waves: bool,
force_version: Option<Version>,
all: bool,
reboot: bool,
variant: Option<String>,
}
/// Parse the command line arguments to get the user-specified values
fn parse_args(args: std::env::Args) -> Arguments {
let mut subcommand = None;
let mut log_level = None;
let mut update_version = None;
let mut ignore_waves = false;
let mut json = false;
let mut all = false;
let mut reboot = false;
let mut variant = None;
let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--log-level" => {
let log_level_str = iter
.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --log-level"));
log_level =
Some(LevelFilter::from_str(&log_level_str).unwrap_or_else(|_| {
usage_msg(format!("Invalid log level '{log_level_str}'"))
}));
}
"-i" | "--image" => match iter.next() {
Some(v) => match Version::parse(&v) {
Ok(v) => update_version = Some(v),
_ => usage(),
},
_ => usage(),
},
"--variant" => {
variant = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --variant")),
);
}
"-n" | "--now" | "--ignore-waves" => {
ignore_waves = true;
}
"-j" | "--json" => {
json = true;
}
"-r" | "--reboot" => {
reboot = true;
}
"-a" | "--all" => {
all = true;
}
// Assume any arguments not prefixed with '-' is a subcommand
s if!s.starts_with('-') => {
if subcommand.is_some() {
usage();
}
subcommand = Some(s.to_string());
}
_ => usage(),
}
}
Arguments {
subcommand: subcommand.unwrap_or_else(|| usage()),
log_level: log_level.unwrap_or(LevelFilter::Info),
json,
ignore_waves,
force_version: update_version,
all,
reboot,
variant,
}
}
fn fmt_full_version(update: &Update) -> String {
format!("{} {}", update.variant, update.version)
}
fn output<T: Serialize>(json: bool, object: T, string: &str) -> Result<()> {
if json {
println!(
"{}",
serde_json::to_string_pretty(&object).context(error::UpdateSerializeSnafu)?
);
} else {
println!("{string}");
}
Ok(())
}
fn initiate_reboot() -> Result<()> {
// Set up signal handler for termination signals
let mut signals = Signals::new([SIGTERM]).context(error::SignalSnafu)?;
let signals_handle = signals.handle();
thread::spawn(move || {
for _sig in signals.forever() {
// Ignore termination signals in case updog gets terminated
// before getting to exit normally by itself after invoking
// `shutdown -r` to complete the update.
}
});
if let Err(err) = process::Command::new("shutdown")
.arg("-r")
.status()
.context(error::RebootFailureSnafu)
{
// Kill the signal handling thread
signals_handle.close();
return Err(err);
}
Ok(())
}
/// Our underlying HTTP client, reqwest, supports proxies by reading the `HTTPS_PROXY` and `NO_PROXY`
/// environment variables. Bottlerocket services can source proxy.env before running, but updog is
/// not a service, so we read these values from the config file and add them to the environment
/// here.
fn set_https_proxy_environment_variables(
https_proxy: &Option<String>,
no_proxy: &Option<Vec<String>>,
) {
let proxy = match https_proxy {
Some(s) if!s.is_empty() => s.clone(),
// without https_proxy, no_proxy does nothing, so we are done
_ => return,
};
std::env::set_var("HTTPS_PROXY", proxy);
if let Some(no_proxy) = no_proxy {
if!no_proxy.is_empty() {
let no_proxy_string = no_proxy.join(",");
debug!("setting NO_PROXY={}", no_proxy_string);
std::env::set_var("NO_PROXY", &no_proxy_string);
}
}
}
#[allow(clippy::too_many_lines)]
fn main_inner() -> Result<()> {
// Parse and store the arguments passed to the program
let arguments = parse_args(std::env::args());
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(arguments.log_level, LogConfig::default()).context(error::LoggerSnafu)?;
let command =
serde_plain::from_str::<Command>(&arguments.subcommand).unwrap_or_else(|_| usage());
let config = load_config()?;
set_https_proxy_environment_variables(&config.https_proxy, &config.no_proxy);
let current_release = BottlerocketRelease::new().context(error::ReleaseVersionSnafu)?;
let variant = arguments.variant.unwrap_or(current_release.variant_id);
let transport = HttpQueryTransport::new();
// get a shared pointer to the transport's query_params so we can add metrics information to
// the transport's HTTP calls.
let mut query_params = transport.query_params();
set_common_query_params(&mut query_params, ¤t_release.version_id, &config);
let repository = load_repository(transport, &config)?;
let manifest = load_manifest(&repository)?;
let ignore_waves = arguments.ignore_waves || config.ignore_waves;
match command {
Command::CheckUpdate | Command::Whats => {
if arguments.all {
return list_updates(
&manifest,
&variant,
arguments.json,
ignore_waves,
config.seed,
);
}
let update = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)?
.context(error::UpdateNotAvailableSnafu)?;
output(arguments.json, update, &fmt_full_version(update))?;
}
Command::Update | Command::UpdateImage => {
if let Some(u) = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)? {
eprintln!("Starting update to {}", u.version);
query_params.add("target", u.version.to_string());
retrieve_migrations(
&repository,
&mut query_params,
&manifest,
u,
¤t_release.version_id,
)?;
update_image(u, &repository)?;
if command == Command::Update {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
output(
arguments.json,
u,
&format!("Update applied: {}", fmt_full_version(u)),
)?;
} else {
eprintln!("No update required");
}
}
Command::UpdateApply => {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
Command::UpdateRevert => {
revert_update_flags()?;
}
Command::Prepare => {
// TODO unimplemented
}
}
Ok(())
}
fn load_manifest(repository: &tough::Repository) -> Result<Manifest> {
let target = "manifest.json";
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
Manifest::from_json(
repository
.read_target(&target)
.context(error::ManifestLoadSnafu)?
.context(error::ManifestNotFoundSnafu)?,
)
.context(error::ManifestParseSnafu)
}
fn main() ->! {
std::process::exit(match main_inner() {
Ok(()) => 0,
Err(err) => {
eprintln!("{err}");
if let Some(var) = std::env::var_os("RUST_BACKTRACE") {
if var!= "0" {
if let Some(backtrace) = err.backtrace() {
eprintln!("\n{backtrace:?}");
}
}
}
1
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration as TestDuration;
use std::collections::BTreeMap;
use update_metadata::Images;
#[test]
fn test_manifest_json() {
// Loads a general example of a manifest that includes an update with waves,
// a set of migrations, and some datastore mappings.
// This tests checks that it parses and the following properties are correct:
// - the (1.0, 1.1) migrations exist with the migration "migrate_1.1_foo"
// - the image:datastore mappings exist
// - there is a mapping between 1.11.0 and 1.0
let path = "tests/data/example.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(
!manifest.updates.is_empty(),
"Failed to parse update manifest"
);
assert!(
!manifest.migrations.is_empty(),
"Failed to parse migrations"
);
let from = Version::parse("1.11.0").unwrap();
let to = Version::parse("1.12.0").unwrap();
assert!(manifest
.migrations
.contains_key(&(from.clone(), to.clone())));
let migration = manifest.migrations.get(&(from, to)).unwrap();
assert!(migration[0] == "migrate_1.12.0_foo");
}
#[test]
fn test_serde_reader() {
// A basic manifest with a single update, no migrations, and two
// image:datastore mappings
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(!manifest.updates.is_empty());
}
#[test]
fn test_versions() {
// A manifest with a single update whose version exceeds the max version.
// update in manifest has
// - version: 1.25.0
// - max_version: 1.20.0
let path = "tests/data/regret.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.18.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
assert!(
update_required(
&manifest,
| revert_update_flags | identifier_name |
main.rs | ::UrlParseSnafu {
url: &config.targets_base_url,
})?,
)
.transport(transport)
.load()
.context(error::MetadataSnafu)
}
fn applicable_updates<'a>(
manifest: &'a Manifest,
variant: &str,
ignore_waves: bool,
seed: u32,
) -> Vec<&'a Update> {
let mut updates: Vec<&Update> = manifest
.updates
.iter()
.filter(|u| {
u.variant == *variant
&& u.arch == TARGET_ARCH
&& u.version <= u.max_version
&& (ignore_waves || u.update_ready(seed, Utc::now()))
})
.collect();
// sort descending
updates.sort_unstable_by(|a, b| b.version.cmp(&a.version));
updates
}
// TODO use config if there is api-sourced configuration that could affect this
// TODO updog.toml may include settings that cause us to ignore/delay
// certain/any updates;
// Ignore Specific Target Version
// Ignore Any Target
// ...
fn update_required<'a>(
manifest: &'a Manifest,
version: &Version,
variant: &str,
ignore_waves: bool,
seed: u32,
version_lock: &str,
force_version: Option<Version>,
) -> Result<Option<&'a Update>> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if let Some(forced_version) = force_version {
return Ok(updates.into_iter().find(|u| u.version == forced_version));
}
if version_lock!= "latest" {
// Make sure the version string from the config is a valid version string that might be prefixed with 'v'
let friendly_version_lock =
FriendlyVersion::try_from(version_lock).context(error::BadVersionConfigSnafu {
version_str: version_lock,
})?;
// Convert back to semver::Version
let semver_version_lock =
friendly_version_lock
.try_into()
.context(error::BadVersionSnafu {
version_str: version_lock,
})?;
// If the configured version-lock matches our current version, we won't update to the same version
return if semver_version_lock == *version {
Ok(None)
} else {
Ok(updates
.into_iter()
.find(|u| u.version == semver_version_lock))
};
}
for update in updates {
// If the current running version is greater than the max version ever published,
// or moves us to a valid version <= the maximum version, update.
if *version < update.version || *version > update.max_version {
return Ok(Some(update));
}
}
Ok(None)
}
fn write_target_to_disk<P: AsRef<Path>>(
repository: &Repository,
target: &str,
disk_path: P,
) -> Result<()> {
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
let reader = repository
.read_target(&target)
.context(error::MetadataSnafu)?
.context(error::TargetNotFoundSnafu {
target: target.raw(),
})?;
// Note: the file extension for the compression type we're using should be removed in
// retrieve_migrations below.
let mut reader = lz4::Decoder::new(reader).context(error::Lz4DecodeSnafu {
target: target.raw(),
})?;
let mut f = OpenOptions::new()
.write(true)
.create(true)
.open(disk_path.as_ref())
.context(error::OpenPartitionSnafu {
path: disk_path.as_ref(),
})?;
io::copy(&mut reader, &mut f).context(error::WriteUpdateSnafu)?;
Ok(())
}
/// Store required migrations for an update in persistent storage. All intermediate migrations
/// between the current version and the target version must be retrieved.
fn retrieve_migrations(
repository: &Repository,
query_params: &mut QueryParams,
manifest: &Manifest,
update: &Update,
current_version: &Version,
) -> Result<()> {
// the migrations required for foo to bar and bar to foo are
// the same; we can pretend we're always upgrading from foo to
// bar and use the same logic to obtain the migrations
let target = std::cmp::max(&update.version, current_version);
let start = std::cmp::min(&update.version, current_version);
let dir = Path::new(MIGRATION_PATH);
if!dir.exists() {
fs::create_dir(dir).context(error::DirCreateSnafu { path: &dir })?;
}
// find the list of migrations in the manifest based on our from and to versions.
let mut targets = find_migrations(start, target, manifest)?;
// we need to store the manifest so that migrator can independently and securely determine the
// migration list. this is true even if there are no migrations.
targets.push("manifest.json".to_owned());
repository
.cache(METADATA_PATH, MIGRATION_PATH, Some(&targets), true)
.context(error::RepoCacheMigrationsSnafu)?;
// Set a query parameter listing the required migrations
query_params.add("migrations", targets.join(","));
Ok(())
}
fn update_image(update: &Update, repository: &Repository) -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.clear_inactive();
// Write out the clearing of the inactive partition immediately, because we're about to
// overwrite the partition set with update data and don't want it to be used until we
// know we're done with all components.
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
let inactive = gpt_state.inactive_set();
// TODO Do we want to recover the inactive side on an error?
write_target_to_disk(repository, &update.images.root, &inactive.root)?;
write_target_to_disk(repository, &update.images.boot, &inactive.boot)?;
write_target_to_disk(repository, &update.images.hash, &inactive.hash)?;
gpt_state.mark_inactive_valid();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state
.upgrade_to_inactive()
.context(error::InactivePartitionUpgradeSnafu)?;
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn revert_update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.cancel_upgrade();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn set_common_query_params(
query_params: &mut QueryParams,
current_version: &Version,
config: &Config,
) {
query_params.add("version", current_version.to_string());
query_params.add("seed", config.seed.to_string());
}
/// List any available update that matches the current variant
fn list_updates(
manifest: &Manifest,
variant: &str,
json: bool,
ignore_waves: bool,
seed: u32,
) -> Result<()> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if json {
println!(
"{}",
serde_json::to_string_pretty(&updates).context(error::UpdateSerializeSnafu)?
);
} else {
for u in updates {
eprintln!("{}", &fmt_full_version(u));
}
}
Ok(())
}
/// Struct to hold the specified command line argument values
#[allow(clippy::struct_excessive_bools)]
struct Arguments {
subcommand: String,
log_level: LevelFilter,
json: bool,
ignore_waves: bool,
force_version: Option<Version>,
all: bool,
reboot: bool,
variant: Option<String>,
}
/// Parse the command line arguments to get the user-specified values
fn parse_args(args: std::env::Args) -> Arguments {
let mut subcommand = None;
let mut log_level = None;
let mut update_version = None;
let mut ignore_waves = false;
let mut json = false;
let mut all = false;
let mut reboot = false;
let mut variant = None;
let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--log-level" => {
let log_level_str = iter
.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --log-level"));
log_level =
Some(LevelFilter::from_str(&log_level_str).unwrap_or_else(|_| {
usage_msg(format!("Invalid log level '{log_level_str}'"))
}));
}
"-i" | "--image" => match iter.next() {
Some(v) => match Version::parse(&v) {
Ok(v) => update_version = Some(v),
_ => usage(),
},
_ => usage(),
},
"--variant" => {
variant = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --variant")),
);
}
"-n" | "--now" | "--ignore-waves" => {
ignore_waves = true;
}
"-j" | "--json" => {
json = true;
}
"-r" | "--reboot" => {
reboot = true;
}
"-a" | "--all" => {
all = true;
}
// Assume any arguments not prefixed with '-' is a subcommand
s if!s.starts_with('-') => {
if subcommand.is_some() {
usage();
}
subcommand = Some(s.to_string());
}
_ => usage(),
}
}
Arguments {
subcommand: subcommand.unwrap_or_else(|| usage()),
log_level: log_level.unwrap_or(LevelFilter::Info),
json,
ignore_waves,
force_version: update_version,
all,
reboot,
variant,
}
}
fn fmt_full_version(update: &Update) -> String {
format!("{} {}", update.variant, update.version)
}
fn output<T: Serialize>(json: bool, object: T, string: &str) -> Result<()> {
if json {
println!(
"{}",
serde_json::to_string_pretty(&object).context(error::UpdateSerializeSnafu)?
);
} else {
println!("{string}");
}
Ok(())
}
fn initiate_reboot() -> Result<()> {
// Set up signal handler for termination signals
let mut signals = Signals::new([SIGTERM]).context(error::SignalSnafu)?;
let signals_handle = signals.handle();
thread::spawn(move || {
for _sig in signals.forever() {
// Ignore termination signals in case updog gets terminated
// before getting to exit normally by itself after invoking
// `shutdown -r` to complete the update.
}
});
if let Err(err) = process::Command::new("shutdown")
.arg("-r")
.status()
.context(error::RebootFailureSnafu)
{
// Kill the signal handling thread
signals_handle.close();
return Err(err);
}
Ok(())
}
/// Our underlying HTTP client, reqwest, supports proxies by reading the `HTTPS_PROXY` and `NO_PROXY`
/// environment variables. Bottlerocket services can source proxy.env before running, but updog is
/// not a service, so we read these values from the config file and add them to the environment
/// here.
fn set_https_proxy_environment_variables(
https_proxy: &Option<String>,
no_proxy: &Option<Vec<String>>,
) {
let proxy = match https_proxy {
Some(s) if!s.is_empty() => s.clone(),
// without https_proxy, no_proxy does nothing, so we are done
_ => return,
};
std::env::set_var("HTTPS_PROXY", proxy);
if let Some(no_proxy) = no_proxy {
if!no_proxy.is_empty() {
let no_proxy_string = no_proxy.join(",");
debug!("setting NO_PROXY={}", no_proxy_string);
std::env::set_var("NO_PROXY", &no_proxy_string);
}
}
}
#[allow(clippy::too_many_lines)]
fn main_inner() -> Result<()> {
// Parse and store the arguments passed to the program
let arguments = parse_args(std::env::args());
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(arguments.log_level, LogConfig::default()).context(error::LoggerSnafu)?;
let command =
serde_plain::from_str::<Command>(&arguments.subcommand).unwrap_or_else(|_| usage());
let config = load_config()?;
set_https_proxy_environment_variables(&config.https_proxy, &config.no_proxy);
let current_release = BottlerocketRelease::new().context(error::ReleaseVersionSnafu)?;
let variant = arguments.variant.unwrap_or(current_release.variant_id);
let transport = HttpQueryTransport::new();
// get a shared pointer to the transport's query_params so we can add metrics information to
// the transport's HTTP calls.
let mut query_params = transport.query_params();
set_common_query_params(&mut query_params, ¤t_release.version_id, &config);
let repository = load_repository(transport, &config)?;
let manifest = load_manifest(&repository)?;
let ignore_waves = arguments.ignore_waves || config.ignore_waves;
match command {
Command::CheckUpdate | Command::Whats => {
if arguments.all {
return list_updates(
&manifest,
&variant,
arguments.json,
ignore_waves,
config.seed,
);
}
let update = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)?
.context(error::UpdateNotAvailableSnafu)?;
output(arguments.json, update, &fmt_full_version(update))?;
}
Command::Update | Command::UpdateImage => {
if let Some(u) = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)? {
eprintln!("Starting update to {}", u.version);
query_params.add("target", u.version.to_string());
retrieve_migrations(
&repository,
&mut query_params,
&manifest,
u,
¤t_release.version_id,
)?;
update_image(u, &repository)?;
if command == Command::Update {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
output(
arguments.json,
u,
&format!("Update applied: {}", fmt_full_version(u)),
)?;
} else {
eprintln!("No update required");
}
}
Command::UpdateApply => {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
Command::UpdateRevert => {
revert_update_flags()?;
}
Command::Prepare => {
// TODO unimplemented
}
}
Ok(())
}
fn load_manifest(repository: &tough::Repository) -> Result<Manifest> {
let target = "manifest.json";
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
Manifest::from_json(
repository
.read_target(&target)
.context(error::ManifestLoadSnafu)?
.context(error::ManifestNotFoundSnafu)?,
)
.context(error::ManifestParseSnafu)
}
fn main() ->! {
std::process::exit(match main_inner() {
Ok(()) => 0,
Err(err) => {
eprintln!("{err}");
if let Some(var) = std::env::var_os("RUST_BACKTRACE") {
if var!= "0" {
if let Some(backtrace) = err.backtrace() {
eprintln!("\n{backtrace:?}");
}
}
}
1
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration as TestDuration;
use std::collections::BTreeMap;
use update_metadata::Images;
#[test]
fn test_manifest_json() {
// Loads a general example of a manifest that includes an update with waves,
// a set of migrations, and some datastore mappings.
// This tests checks that it parses and the following properties are correct:
// - the (1.0, 1.1) migrations exist with the migration "migrate_1.1_foo"
// - the image:datastore mappings exist
// - there is a mapping between 1.11.0 and 1.0
let path = "tests/data/example.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(
!manifest.updates.is_empty(),
"Failed to parse update manifest"
);
assert!(
!manifest.migrations.is_empty(),
"Failed to parse migrations"
);
let from = Version::parse("1.11.0").unwrap();
let to = Version::parse("1.12.0").unwrap();
assert!(manifest
.migrations
.contains_key(&(from.clone(), to.clone())));
let migration = manifest.migrations.get(&(from, to)).unwrap();
assert!(migration[0] == "migrate_1.12.0_foo");
}
#[test]
fn test_serde_reader() {
// A basic manifest with a single update, no migrations, and two
// image:datastore mappings
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(!manifest.updates.is_empty());
}
#[test]
fn test_versions() | update_required(
&manifest,
| {
// A manifest with a single update whose version exceeds the max version.
// update in manifest has
// - version: 1.25.0
// - max_version: 1.20.0
let path = "tests/data/regret.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.18.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
assert!( | identifier_body |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&&!line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn | (&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x|!x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
}
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if!self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if!self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
}
| module_used | identifier_name |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&&!line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn module_used(&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x|!x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() |
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if!self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if!self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
}
| {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
} | conditional_block |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&&!line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn module_used(&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x|!x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
}
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone(); | self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if!self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
} | } else if !self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{ | random_line_split |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&&!line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn module_used(&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x|!x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
}
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> |
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if!self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if!self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if!self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
}
| {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
} | identifier_body |
frameworks.rs | // stripped mac core foundation + metal layer only whats needed
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
pub use {
std::{
ffi::c_void,
os::raw::c_ulong,
ptr::NonNull,
},
crate::{
makepad_platform::{
os::apple::frameworks::*,
makepad_objc_sys::{
runtime::{Class, Object, Protocol, Sel, BOOL, YES, NO},
declare::ClassDecl,
msg_send,
sel,
class,
sel_impl,
Encode,
Encoding
},
},
}
};
// CORE AUDIO
pub const kAudioUnitManufacturer_Apple: u32 = 1634758764;
#[repr(C)] pub struct OpaqueAudioComponent([u8; 0]);
pub type CAudioComponent = *mut OpaqueAudioComponent;
#[repr(C)] pub struct ComponentInstanceRecord([u8; 0]);
pub type CAudioComponentInstance = *mut ComponentInstanceRecord;
pub type CAudioUnit = CAudioComponentInstance;
pub type OSStatus = i32;
#[repr(C)]
pub struct CAudioStreamBasicDescription {
pub mSampleRate: f64,
pub mFormatID: AudioFormatId,
pub mFormatFlags: u32,
pub mBytesPerPacket: u32,
pub mFramesPerPacket: u32,
pub mBytesPerFrame: u32,
pub mChannelsPerFrame: u32,
pub mBitsPerChannel: u32,
pub mReserved: u32,
}
#[repr(u32)]
pub enum AudioFormatId {
LinearPCM = 1819304813,
AC3 = 1633889587,
F60958AC3 = 1667326771,
AppleIMA4 = 1768775988,
MPEG4AAC = 1633772320,
MPEG4CELP = 1667591280,
MPEG4HVXC = 1752594531,
MPEG4TwinVQ = 1953986161,
MACE3 = 1296122675,
MACE6 = 1296122678,
ULaw = 1970037111,
ALaw = 1634492791,
QDesign = 1363430723,
QDesign2 = 1363430706,
QUALCOMM = 1365470320,
MPEGLayer1 = 778924081,
MPEGLayer2 = 778924082,
MPEGLayer3 = 778924083,
TimeCode = 1953066341,
MIDIStream = 1835623529,
ParameterValueStream = 1634760307,
AppleLossless = 1634492771,
MPEG4AAC_HE = 1633772392,
MPEG4AAC_LD = 1633772396,
MPEG4AAC_ELD = 1633772389,
MPEG4AAC_ELD_SBR = 1633772390,
MPEG4AAC_ELD_V2 = 1633772391,
MPEG4AAC_HE_V2 = 1633772400,
MPEG4AAC_Spatial = 1633772403,
AMR = 1935764850,
AMR_WB = 1935767394,
Audible = 1096107074,
iLBC = 1768710755,
DVIIntelIMA = 1836253201,
MicrosoftGSM = 1836253233,
AES3 = 1634038579,
}
/*
struct F60958AC3Flags;
impl F60958AC3Flags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
}
*/
/*
pub struct LinearPcmFlags;
impl LinearPcmFlags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
const FLAGS_SAMPLE_FRACTION_SHIFT: u32 = 7;
const FLAGS_SAMPLE_FRACTION_MASK: u32 = 8064;
}
pub struct AppleLosslessFlags;
impl AppleLosslessFlags {
const BIT_16_SOURCE_DATA: u32 = 1;
const BIT_20_SOURCE_DATA: u32 = 2;
const BIT_24_SOURCE_DATA: u32 = 3;
const BIT_32_SOURCE_DATA: u32 = 4;
}
*/
#[repr(u32)]
pub enum Mpeg4ObjectId {
AAC_Main = 1,
AAC_LC = 2,
AAC_SSR = 3,
AAC_LTP = 4,
AAC_SBR = 5,
AAC_Scalable = 6,
TwinVQ = 7,
CELP = 8,
HVXC = 9,
}
/*
pub struct AudioTimeStampFlags;
impl AudioTimeStampFlags {
const SAMPLE_TIME_VALID: u32 = 1;
const HOST_TIME_VALID: u32 = 2;
const RATE_SCALAR_VALID: u32 = 4;
const WORLD_CLOCK_TIME_VALID: u32 = 8;
const SMPTE_TIME_VALID: u32 = 16;
}
*/
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
pub struct CAudioComponentDescription {
pub componentType: CAudioUnitType,
pub componentSubType: CAudioUnitSubType,
pub componentManufacturer: u32,
pub componentFlags: u32,
pub componentFlagsMask: u32,
}
impl CAudioComponentDescription {
pub fn new_apple(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0,
}
}
pub fn new_all_manufacturers(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: 0,
componentFlags: 0,
componentFlagsMask: 0,
}
}
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct SMPTETime {
pub mSubframes: i16,
pub mSubframeDivisor: i16,
pub mCounter: u32,
pub mType: u32,
pub mFlags: u32,
pub mHours: i16,
pub mMinutes: i16,
pub mSeconds: i16,
pub mFrames: i16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _AudioBuffer {
pub mNumberChannels: u32,
pub mDataByteSize: u32,
pub mData: *mut ::std::os::raw::c_void,
}
pub const MAX_AUDIO_BUFFERS: usize = 8;
#[repr(C)]
pub struct CAudioBufferList {
pub mNumberBuffers: u32,
pub mBuffers: [_AudioBuffer; MAX_AUDIO_BUFFERS],
}
#[derive(Debug)]
#[repr(C)]
pub struct CAudioTimeStamp {
pub mSampleTime: f64,
pub mHostTime: u64,
pub mRateScalar: f64,
pub mWordClockTime: u64,
pub mSMPTETime: SMPTETime,
pub mFlags: u32,
pub mReserved: u32,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitType {
IO = 1635086197,
MusicDevice = 1635085685,
MusicEffect = 1635085670,
FormatConverter = 1635083875,
Effect = 1635083896,
Mixer = 1635085688,
Panner = 1635086446,
Generator = 1635084142,
OfflineEffect = 1635086188,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitSubType {
Undefined = 0,
PeakLimiter = 1819112562,
DynamicsProcessor = 1684237680,
LowPassFilter = 1819304307,
HighPassFilter = 1752195443,
BandPassFilter = 1651532147,
HighShelfFilter = 1752393830,
LowShelfFilter = 1819502694,
ParametricEQ = 1886217585,
Distortion = 1684632436,
Delay = 1684368505,
SampleDelay = 1935961209,
GraphicEQ = 1735550321,
MultiBandCompressor = 1835232624,
MatrixReverb = 1836213622,
Pitch = 1953329268,
AUFilter = 1718185076,
NetSend = 1853058660,
RogerBeep = 1919903602,
NBandEQ = 1851942257,
//pub enum FormatConverterType
AUConverter = 1668247158,
NewTimePitch = 1853191280,
//TimePitch = 1953329268,
DeferredRenderer = 1684366962,
Splitter = 1936747636,
Merger = 1835364967,
Varispeed = 1986097769,
AUiPodTimeOther = 1768977519,
//pub enum MixerType
MultiChannelMixer = 1835232632,
StereoMixer = 1936554098,
Mixer3D = 862219640,
MatrixMixer = 1836608888,
//pub enum GeneratorType {
ScheduledSoundPlayer = 1936945260,
AudioFilePlayer = 1634103404,
//pub enum MusicDeviceType {
DLSSynth = 1684828960,
Sampler = 1935764848,
//pub enum IOType {
GenericOutput = 1734700658,
HalOutput = 1634230636,
DefaultOutput = 1684366880,
SystemOutput = 1937339168,
VoiceProcessingIO = 1987078511,
RemoteIO = 1919512419,
}
#[derive(Debug)]
#[repr(i32)]
pub enum OSError {
Unimplemented = -4,
FileNotFound = -43,
FilePermission = -54,
TooManyFilesOpen = -42,
Unspecified = -1500,
SystemSoundClientMessageTimeout = -1501,
BadFilePath = 561017960,
Param = -50,
MemFull = -108,
FormatUnspecified = 2003329396,
UnknownProperty = 2003332927,
BadPropertySize = 561211770,
IllegalOperation = 1852797029,
UnsupportedFormat = 560226676,
State = 561214580,
NotEnoughBufferSpace = 560100710,
UnsupportedDataFormat = 1718449215,
InvalidProperty = -10879,
InvalidParameter = -10878,
InvalidElement = -10877,
NoConnection = -10876,
FailedInitialization = -10875,
TooManyFramesToProcess = -10874,
InvalidFile = -10871,
FormatNotSupported = -10868,
Uninitialized = -10867,
InvalidScope = -10866,
PropertyNotWritable = -10865,
CannotDoInCurrentContext = -10863,
InvalidPropertyValue = -10851,
PropertyNotInUse = -10850,
Initialized = -10849,
InvalidOfflineRender = -10848,
Unauthorized = -10847,
NoMatchingDefaultAudioUnitFound,
Unknown,
}
pub const kAudioComponentInstantiation_LoadInProcess: u32 = 2;
pub const kAudioComponentInstantiation_LoadOutOfProcess: u32 = 1;
impl OSError {
pub fn from(result: i32) -> Result<(), Self> {
Err(match result {
0 => return Ok(()),
x if x == Self::Unimplemented as i32 => Self::Unimplemented,
x if x == Self::FileNotFound as i32 => Self::FileNotFound,
x if x == Self::FilePermission as i32 => Self::FilePermission,
x if x == Self::TooManyFilesOpen as i32 => Self::TooManyFilesOpen,
x if x == Self::Unspecified as i32 => Self::Unspecified,
x if x == Self::SystemSoundClientMessageTimeout as i32 => Self::SystemSoundClientMessageTimeout,
x if x == Self::BadFilePath as i32 => Self::BadFilePath,
x if x == Self::Param as i32 => Self::Param,
x if x == Self::MemFull as i32 => Self::MemFull,
x if x == Self::FormatUnspecified as i32 => Self::FormatUnspecified,
x if x == Self::UnknownProperty as i32 => Self::UnknownProperty,
x if x == Self::BadPropertySize as i32 => Self::BadPropertySize,
x if x == Self::IllegalOperation as i32 => Self::IllegalOperation,
x if x == Self::UnsupportedFormat as i32 => Self::UnsupportedFormat,
x if x == Self::State as i32 => Self::State,
x if x == Self::NotEnoughBufferSpace as i32 => Self::NotEnoughBufferSpace,
x if x == Self::UnsupportedDataFormat as i32 => Self::UnsupportedDataFormat,
x if x == Self::InvalidProperty as i32 => Self::InvalidProperty,
x if x == Self::InvalidParameter as i32 => Self::InvalidParameter,
x if x == Self::InvalidElement as i32 => Self::InvalidElement,
x if x == Self::NoConnection as i32 => Self::NoConnection,
x if x == Self::FailedInitialization as i32 => Self::FailedInitialization,
x if x == Self::TooManyFramesToProcess as i32 => Self::TooManyFramesToProcess,
x if x == Self::InvalidFile as i32 => Self::InvalidFile,
x if x == Self::FormatNotSupported as i32 => Self::FormatNotSupported,
x if x == Self::Uninitialized as i32 => Self::Uninitialized,
x if x == Self::InvalidScope as i32 => Self::InvalidScope,
x if x == Self::PropertyNotWritable as i32 => Self::PropertyNotWritable,
x if x == Self::CannotDoInCurrentContext as i32 => Self::CannotDoInCurrentContext,
x if x == Self::InvalidPropertyValue as i32 => Self::InvalidPropertyValue,
x if x == Self::PropertyNotInUse as i32 => Self::PropertyNotInUse,
x if x == Self::Initialized as i32 => Self::Initialized,
x if x == Self::InvalidOfflineRender as i32 => Self::InvalidOfflineRender,
x if x == Self::Unauthorized as i32 => Self::Unauthorized,
_ => Self::Unknown
})
}
pub fn from_nserror(ns_error: ObjcId) -> Result<(), Self> {
if ns_error!= nil {
let code: i32 = unsafe {msg_send![ns_error, code]};
Self::from(code)
}
else {
Ok(())
}
}
}
pub type ItemCount = u64;
pub type MIDIObjectRef = u32;
pub type MIDIClientRef = MIDIObjectRef;
pub type MIDIPortRef = MIDIObjectRef;
pub type MIDIEndpointRef = MIDIObjectRef;
pub type MIDIProtocolID = i32;
pub type MIDITimeStamp = u64;
pub const kMIDIProtocol_1_0: i32 = 1;
pub const kMIDIProtocol_2_0: i32 = 2;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct MIDINotification {
pub messageID: i32,
pub messageSize: u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct | {
pub protocol: MIDIProtocolID,
pub numPackets: u32,
pub packet: [MIDIEventPacket; 1usize],
}
#[repr(C, packed(4))]
#[derive(Copy, Clone)]
pub struct MIDIEventPacket {
pub timeStamp: MIDITimeStamp,
pub wordCount: u32,
pub words: [u32; 64usize],
}
#[link(name = "CoreMidi", kind = "framework")]
extern "C" {
pub static kMIDIPropertyManufacturer: CFStringRef;
pub static kMIDIPropertyDisplayName: CFStringRef;
pub static kMIDIPropertyUniqueID: CFStringRef;
pub fn MIDIGetNumberOfSources() -> ItemCount;
pub fn MIDIGetSource(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDIGetNumberOfDestinations() -> ItemCount;
pub fn MIDIGetDestination(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDISendEventList(
port: MIDIPortRef,
dest: MIDIEndpointRef,
evtlist: *const MIDIEventList,
) -> OSStatus;
pub fn MIDIClientCreateWithBlock(
name: CFStringRef,
outClient: *mut MIDIClientRef,
notifyBlock: ObjcId,
) -> OSStatus;
pub fn MIDIInputPortCreateWithProtocol(
client: MIDIClientRef,
portName: CFStringRef,
protocol: MIDIProtocolID,
outPort: *mut MIDIPortRef,
receiveBlock: ObjcId,
) -> OSStatus;
pub fn MIDIOutputPortCreate(
client: MIDIClientRef,
portName: CFStringRef,
outPort: *mut MIDIPortRef,
) -> OSStatus;
pub fn MIDIObjectGetStringProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
str_: *mut CFStringRef,
) -> OSStatus;
pub fn MIDIObjectGetIntegerProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
outValue: *mut i32,
) -> OSStatus;
pub fn MIDIPortConnectSource(
port: MIDIPortRef,
source: MIDIEndpointRef,
connRefCon: *mut ::std::os::raw::c_void,
) -> OSStatus;
}
| MIDIEventList | identifier_name |
frameworks.rs | // stripped mac core foundation + metal layer only whats needed
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
pub use {
std::{
ffi::c_void,
os::raw::c_ulong,
ptr::NonNull,
},
crate::{
makepad_platform::{
os::apple::frameworks::*,
makepad_objc_sys::{
runtime::{Class, Object, Protocol, Sel, BOOL, YES, NO},
declare::ClassDecl,
msg_send,
sel,
class,
sel_impl,
Encode,
Encoding
},
}, |
pub const kAudioUnitManufacturer_Apple: u32 = 1634758764;
#[repr(C)] pub struct OpaqueAudioComponent([u8; 0]);
pub type CAudioComponent = *mut OpaqueAudioComponent;
#[repr(C)] pub struct ComponentInstanceRecord([u8; 0]);
pub type CAudioComponentInstance = *mut ComponentInstanceRecord;
pub type CAudioUnit = CAudioComponentInstance;
pub type OSStatus = i32;
#[repr(C)]
pub struct CAudioStreamBasicDescription {
pub mSampleRate: f64,
pub mFormatID: AudioFormatId,
pub mFormatFlags: u32,
pub mBytesPerPacket: u32,
pub mFramesPerPacket: u32,
pub mBytesPerFrame: u32,
pub mChannelsPerFrame: u32,
pub mBitsPerChannel: u32,
pub mReserved: u32,
}
#[repr(u32)]
pub enum AudioFormatId {
LinearPCM = 1819304813,
AC3 = 1633889587,
F60958AC3 = 1667326771,
AppleIMA4 = 1768775988,
MPEG4AAC = 1633772320,
MPEG4CELP = 1667591280,
MPEG4HVXC = 1752594531,
MPEG4TwinVQ = 1953986161,
MACE3 = 1296122675,
MACE6 = 1296122678,
ULaw = 1970037111,
ALaw = 1634492791,
QDesign = 1363430723,
QDesign2 = 1363430706,
QUALCOMM = 1365470320,
MPEGLayer1 = 778924081,
MPEGLayer2 = 778924082,
MPEGLayer3 = 778924083,
TimeCode = 1953066341,
MIDIStream = 1835623529,
ParameterValueStream = 1634760307,
AppleLossless = 1634492771,
MPEG4AAC_HE = 1633772392,
MPEG4AAC_LD = 1633772396,
MPEG4AAC_ELD = 1633772389,
MPEG4AAC_ELD_SBR = 1633772390,
MPEG4AAC_ELD_V2 = 1633772391,
MPEG4AAC_HE_V2 = 1633772400,
MPEG4AAC_Spatial = 1633772403,
AMR = 1935764850,
AMR_WB = 1935767394,
Audible = 1096107074,
iLBC = 1768710755,
DVIIntelIMA = 1836253201,
MicrosoftGSM = 1836253233,
AES3 = 1634038579,
}
/*
struct F60958AC3Flags;
impl F60958AC3Flags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
}
*/
/*
pub struct LinearPcmFlags;
impl LinearPcmFlags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
const FLAGS_SAMPLE_FRACTION_SHIFT: u32 = 7;
const FLAGS_SAMPLE_FRACTION_MASK: u32 = 8064;
}
pub struct AppleLosslessFlags;
impl AppleLosslessFlags {
const BIT_16_SOURCE_DATA: u32 = 1;
const BIT_20_SOURCE_DATA: u32 = 2;
const BIT_24_SOURCE_DATA: u32 = 3;
const BIT_32_SOURCE_DATA: u32 = 4;
}
*/
#[repr(u32)]
pub enum Mpeg4ObjectId {
AAC_Main = 1,
AAC_LC = 2,
AAC_SSR = 3,
AAC_LTP = 4,
AAC_SBR = 5,
AAC_Scalable = 6,
TwinVQ = 7,
CELP = 8,
HVXC = 9,
}
/*
pub struct AudioTimeStampFlags;
impl AudioTimeStampFlags {
const SAMPLE_TIME_VALID: u32 = 1;
const HOST_TIME_VALID: u32 = 2;
const RATE_SCALAR_VALID: u32 = 4;
const WORLD_CLOCK_TIME_VALID: u32 = 8;
const SMPTE_TIME_VALID: u32 = 16;
}
*/
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
pub struct CAudioComponentDescription {
pub componentType: CAudioUnitType,
pub componentSubType: CAudioUnitSubType,
pub componentManufacturer: u32,
pub componentFlags: u32,
pub componentFlagsMask: u32,
}
impl CAudioComponentDescription {
pub fn new_apple(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0,
}
}
pub fn new_all_manufacturers(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: 0,
componentFlags: 0,
componentFlagsMask: 0,
}
}
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct SMPTETime {
pub mSubframes: i16,
pub mSubframeDivisor: i16,
pub mCounter: u32,
pub mType: u32,
pub mFlags: u32,
pub mHours: i16,
pub mMinutes: i16,
pub mSeconds: i16,
pub mFrames: i16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _AudioBuffer {
pub mNumberChannels: u32,
pub mDataByteSize: u32,
pub mData: *mut ::std::os::raw::c_void,
}
pub const MAX_AUDIO_BUFFERS: usize = 8;
#[repr(C)]
pub struct CAudioBufferList {
pub mNumberBuffers: u32,
pub mBuffers: [_AudioBuffer; MAX_AUDIO_BUFFERS],
}
#[derive(Debug)]
#[repr(C)]
pub struct CAudioTimeStamp {
pub mSampleTime: f64,
pub mHostTime: u64,
pub mRateScalar: f64,
pub mWordClockTime: u64,
pub mSMPTETime: SMPTETime,
pub mFlags: u32,
pub mReserved: u32,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitType {
IO = 1635086197,
MusicDevice = 1635085685,
MusicEffect = 1635085670,
FormatConverter = 1635083875,
Effect = 1635083896,
Mixer = 1635085688,
Panner = 1635086446,
Generator = 1635084142,
OfflineEffect = 1635086188,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitSubType {
Undefined = 0,
PeakLimiter = 1819112562,
DynamicsProcessor = 1684237680,
LowPassFilter = 1819304307,
HighPassFilter = 1752195443,
BandPassFilter = 1651532147,
HighShelfFilter = 1752393830,
LowShelfFilter = 1819502694,
ParametricEQ = 1886217585,
Distortion = 1684632436,
Delay = 1684368505,
SampleDelay = 1935961209,
GraphicEQ = 1735550321,
MultiBandCompressor = 1835232624,
MatrixReverb = 1836213622,
Pitch = 1953329268,
AUFilter = 1718185076,
NetSend = 1853058660,
RogerBeep = 1919903602,
NBandEQ = 1851942257,
//pub enum FormatConverterType
AUConverter = 1668247158,
NewTimePitch = 1853191280,
//TimePitch = 1953329268,
DeferredRenderer = 1684366962,
Splitter = 1936747636,
Merger = 1835364967,
Varispeed = 1986097769,
AUiPodTimeOther = 1768977519,
//pub enum MixerType
MultiChannelMixer = 1835232632,
StereoMixer = 1936554098,
Mixer3D = 862219640,
MatrixMixer = 1836608888,
//pub enum GeneratorType {
ScheduledSoundPlayer = 1936945260,
AudioFilePlayer = 1634103404,
//pub enum MusicDeviceType {
DLSSynth = 1684828960,
Sampler = 1935764848,
//pub enum IOType {
GenericOutput = 1734700658,
HalOutput = 1634230636,
DefaultOutput = 1684366880,
SystemOutput = 1937339168,
VoiceProcessingIO = 1987078511,
RemoteIO = 1919512419,
}
#[derive(Debug)]
#[repr(i32)]
pub enum OSError {
Unimplemented = -4,
FileNotFound = -43,
FilePermission = -54,
TooManyFilesOpen = -42,
Unspecified = -1500,
SystemSoundClientMessageTimeout = -1501,
BadFilePath = 561017960,
Param = -50,
MemFull = -108,
FormatUnspecified = 2003329396,
UnknownProperty = 2003332927,
BadPropertySize = 561211770,
IllegalOperation = 1852797029,
UnsupportedFormat = 560226676,
State = 561214580,
NotEnoughBufferSpace = 560100710,
UnsupportedDataFormat = 1718449215,
InvalidProperty = -10879,
InvalidParameter = -10878,
InvalidElement = -10877,
NoConnection = -10876,
FailedInitialization = -10875,
TooManyFramesToProcess = -10874,
InvalidFile = -10871,
FormatNotSupported = -10868,
Uninitialized = -10867,
InvalidScope = -10866,
PropertyNotWritable = -10865,
CannotDoInCurrentContext = -10863,
InvalidPropertyValue = -10851,
PropertyNotInUse = -10850,
Initialized = -10849,
InvalidOfflineRender = -10848,
Unauthorized = -10847,
NoMatchingDefaultAudioUnitFound,
Unknown,
}
pub const kAudioComponentInstantiation_LoadInProcess: u32 = 2;
pub const kAudioComponentInstantiation_LoadOutOfProcess: u32 = 1;
impl OSError {
pub fn from(result: i32) -> Result<(), Self> {
Err(match result {
0 => return Ok(()),
x if x == Self::Unimplemented as i32 => Self::Unimplemented,
x if x == Self::FileNotFound as i32 => Self::FileNotFound,
x if x == Self::FilePermission as i32 => Self::FilePermission,
x if x == Self::TooManyFilesOpen as i32 => Self::TooManyFilesOpen,
x if x == Self::Unspecified as i32 => Self::Unspecified,
x if x == Self::SystemSoundClientMessageTimeout as i32 => Self::SystemSoundClientMessageTimeout,
x if x == Self::BadFilePath as i32 => Self::BadFilePath,
x if x == Self::Param as i32 => Self::Param,
x if x == Self::MemFull as i32 => Self::MemFull,
x if x == Self::FormatUnspecified as i32 => Self::FormatUnspecified,
x if x == Self::UnknownProperty as i32 => Self::UnknownProperty,
x if x == Self::BadPropertySize as i32 => Self::BadPropertySize,
x if x == Self::IllegalOperation as i32 => Self::IllegalOperation,
x if x == Self::UnsupportedFormat as i32 => Self::UnsupportedFormat,
x if x == Self::State as i32 => Self::State,
x if x == Self::NotEnoughBufferSpace as i32 => Self::NotEnoughBufferSpace,
x if x == Self::UnsupportedDataFormat as i32 => Self::UnsupportedDataFormat,
x if x == Self::InvalidProperty as i32 => Self::InvalidProperty,
x if x == Self::InvalidParameter as i32 => Self::InvalidParameter,
x if x == Self::InvalidElement as i32 => Self::InvalidElement,
x if x == Self::NoConnection as i32 => Self::NoConnection,
x if x == Self::FailedInitialization as i32 => Self::FailedInitialization,
x if x == Self::TooManyFramesToProcess as i32 => Self::TooManyFramesToProcess,
x if x == Self::InvalidFile as i32 => Self::InvalidFile,
x if x == Self::FormatNotSupported as i32 => Self::FormatNotSupported,
x if x == Self::Uninitialized as i32 => Self::Uninitialized,
x if x == Self::InvalidScope as i32 => Self::InvalidScope,
x if x == Self::PropertyNotWritable as i32 => Self::PropertyNotWritable,
x if x == Self::CannotDoInCurrentContext as i32 => Self::CannotDoInCurrentContext,
x if x == Self::InvalidPropertyValue as i32 => Self::InvalidPropertyValue,
x if x == Self::PropertyNotInUse as i32 => Self::PropertyNotInUse,
x if x == Self::Initialized as i32 => Self::Initialized,
x if x == Self::InvalidOfflineRender as i32 => Self::InvalidOfflineRender,
x if x == Self::Unauthorized as i32 => Self::Unauthorized,
_ => Self::Unknown
})
}
pub fn from_nserror(ns_error: ObjcId) -> Result<(), Self> {
if ns_error!= nil {
let code: i32 = unsafe {msg_send![ns_error, code]};
Self::from(code)
}
else {
Ok(())
}
}
}
pub type ItemCount = u64;
pub type MIDIObjectRef = u32;
pub type MIDIClientRef = MIDIObjectRef;
pub type MIDIPortRef = MIDIObjectRef;
pub type MIDIEndpointRef = MIDIObjectRef;
pub type MIDIProtocolID = i32;
pub type MIDITimeStamp = u64;
pub const kMIDIProtocol_1_0: i32 = 1;
pub const kMIDIProtocol_2_0: i32 = 2;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct MIDINotification {
pub messageID: i32,
pub messageSize: u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct MIDIEventList {
pub protocol: MIDIProtocolID,
pub numPackets: u32,
pub packet: [MIDIEventPacket; 1usize],
}
#[repr(C, packed(4))]
#[derive(Copy, Clone)]
pub struct MIDIEventPacket {
pub timeStamp: MIDITimeStamp,
pub wordCount: u32,
pub words: [u32; 64usize],
}
#[link(name = "CoreMidi", kind = "framework")]
extern "C" {
pub static kMIDIPropertyManufacturer: CFStringRef;
pub static kMIDIPropertyDisplayName: CFStringRef;
pub static kMIDIPropertyUniqueID: CFStringRef;
pub fn MIDIGetNumberOfSources() -> ItemCount;
pub fn MIDIGetSource(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDIGetNumberOfDestinations() -> ItemCount;
pub fn MIDIGetDestination(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDISendEventList(
port: MIDIPortRef,
dest: MIDIEndpointRef,
evtlist: *const MIDIEventList,
) -> OSStatus;
pub fn MIDIClientCreateWithBlock(
name: CFStringRef,
outClient: *mut MIDIClientRef,
notifyBlock: ObjcId,
) -> OSStatus;
pub fn MIDIInputPortCreateWithProtocol(
client: MIDIClientRef,
portName: CFStringRef,
protocol: MIDIProtocolID,
outPort: *mut MIDIPortRef,
receiveBlock: ObjcId,
) -> OSStatus;
pub fn MIDIOutputPortCreate(
client: MIDIClientRef,
portName: CFStringRef,
outPort: *mut MIDIPortRef,
) -> OSStatus;
pub fn MIDIObjectGetStringProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
str_: *mut CFStringRef,
) -> OSStatus;
pub fn MIDIObjectGetIntegerProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
outValue: *mut i32,
) -> OSStatus;
pub fn MIDIPortConnectSource(
port: MIDIPortRef,
source: MIDIEndpointRef,
connRefCon: *mut ::std::os::raw::c_void,
) -> OSStatus;
} | }
};
// CORE AUDIO | random_line_split |
lib.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shareable Substrate types.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
/// Initialize a key-value collection from array.
///
/// Creates a vector of given pairs and calls `collect` on the iterator from it.
/// Can be used to create a `HashMap`.
#[macro_export]
macro_rules! map {
($( $name:expr => $value:expr ),* $(,)? ) => (
vec![ $( ( $name, $value ) ),* ].into_iter().collect()
);
}
#[doc(hidden)]
pub use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;
#[cfg(feature = "serde")]
pub use serde;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use sp_runtime_interface::pass_by::{PassByEnum, PassByInner};
use sp_std::{ops::Deref, prelude::*};
pub use sp_debug_derive::RuntimeDebug;
#[cfg(feature = "serde")]
pub use impl_serde::serialize as bytes;
#[cfg(feature = "full_crypto")]
pub mod hashing;
#[cfg(feature = "full_crypto")]
pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64};
pub mod crypto;
pub mod hexdisplay;
pub use paste;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
pub mod bls;
pub mod defer;
pub mod ecdsa;
pub mod ed25519;
pub mod hash;
#[cfg(feature = "std")]
mod hasher;
pub mod offchain;
pub mod sr25519;
pub mod testing;
#[cfg(feature = "std")]
pub mod traits;
pub mod uint;
#[cfg(feature = "bls-experimental")]
pub use bls::{bls377, bls381};
pub use self::{
hash::{convert_hash, H160, H256, H512},
uint::{U256, U512},
};
#[cfg(feature = "full_crypto")]
pub use crypto::{ByteArray, DeriveJunction, Pair, Public};
#[cfg(feature = "std")]
pub use self::hasher::blake2::Blake2Hasher;
#[cfg(feature = "std")]
pub use self::hasher::keccak::KeccakHasher;
pub use hash_db::Hasher;
pub use bounded_collections as bounded;
#[cfg(feature = "std")]
pub use bounded_collections::{bounded_btree_map, bounded_vec};
pub use bounded_collections::{
parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128,
ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet,
};
pub use sp_storage as storage;
#[doc(hidden)]
pub use sp_std;
/// Hex-serialized shim for `Vec<u8>`.
#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
pub struct Bytes(#[cfg_attr(feature = "serde", serde(with = "bytes"))] pub Vec<u8>);
impl From<Vec<u8>> for Bytes {
fn from(s: Vec<u8>) -> Self {
Bytes(s)
}
}
impl From<OpaqueMetadata> for Bytes {
fn from(s: OpaqueMetadata) -> Self {
Bytes(s.0)
}
}
impl Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.0[..]
}
}
impl codec::WrapperTypeEncode for Bytes {}
impl codec::WrapperTypeDecode for Bytes {
type Wrapped = Vec<u8>;
}
#[cfg(feature = "std")]
impl sp_std::str::FromStr for Bytes {
type Err = bytes::FromHexError;
fn | (s: &str) -> Result<Self, Self::Err> {
bytes::from_hex(s).map(Bytes)
}
}
/// Stores the encoded `RuntimeMetadata` for the native side as opaque type.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
pub struct OpaqueMetadata(Vec<u8>);
impl OpaqueMetadata {
/// Creates a new instance with the given metadata blob.
pub fn new(metadata: Vec<u8>) -> Self {
OpaqueMetadata(metadata)
}
}
impl sp_std::ops::Deref for OpaqueMetadata {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Simple blob to hold a `PeerId` without committing to its format.
#[derive(
Default,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
PassByInner,
TypeInfo,
)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpaquePeerId(pub Vec<u8>);
impl OpaquePeerId {
/// Create new `OpaquePeerId`
pub fn new(vec: Vec<u8>) -> Self {
OpaquePeerId(vec)
}
}
/// Provide a simple 4 byte identifier for a type.
pub trait TypeId {
/// Simple 4 byte identifier.
const TYPE_ID: [u8; 4];
}
/// A log level matching the one from `log` crate.
///
/// Used internally by `sp_io::logging::log` method.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevel {
/// `Error` log level.
Error = 1_isize,
/// `Warn` log level.
Warn = 2_isize,
/// `Info` log level.
Info = 3_isize,
/// `Debug` log level.
Debug = 4_isize,
/// `Trace` log level.
Trace = 5_isize,
}
impl From<u32> for LogLevel {
fn from(val: u32) -> Self {
match val {
x if x == LogLevel::Warn as u32 => LogLevel::Warn,
x if x == LogLevel::Info as u32 => LogLevel::Info,
x if x == LogLevel::Debug as u32 => LogLevel::Debug,
x if x == LogLevel::Trace as u32 => LogLevel::Trace,
_ => LogLevel::Error,
}
}
}
impl From<log::Level> for LogLevel {
fn from(l: log::Level) -> Self {
use log::Level::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<LogLevel> for log::Level {
fn from(l: LogLevel) -> Self {
use self::LogLevel::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Log level filter that expresses which log levels should be filtered.
///
/// This enum matches the [`log::LevelFilter`] enum.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevelFilter {
/// `Off` log level filter.
Off = 0_isize,
/// `Error` log level filter.
Error = 1_isize,
/// `Warn` log level filter.
Warn = 2_isize,
/// `Info` log level filter.
Info = 3_isize,
/// `Debug` log level filter.
Debug = 4_isize,
/// `Trace` log level filter.
Trace = 5_isize,
}
impl From<LogLevelFilter> for log::LevelFilter {
fn from(l: LogLevelFilter) -> Self {
use self::LogLevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<log::LevelFilter> for LogLevelFilter {
fn from(l: log::LevelFilter) -> Self {
use log::LevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`.
///
/// When Substrate calls into Wasm it expects a fixed signature for functions exported
/// from the Wasm blob. The return value of this signature is always a `u64`.
/// This `u64` stores the pointer to the encoded return value and the length of this encoded value.
/// The low `32bits` are reserved for the pointer, followed by `32bit` for the length.
#[cfg(not(feature = "std"))]
pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
let encoded = value.encode();
let ptr = encoded.as_ptr() as u64;
let length = encoded.len() as u64;
let res = ptr | (length << 32);
// Leak the output vector to avoid it being freed.
// This is fine in a WASM context since the heap
// will be discarded after the call.
sp_std::mem::forget(encoded);
res
}
/// The void type - it cannot exist.
// Oh rust, you crack me up...
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub enum Void {}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require
/// the bound on `no_std`. This is useful for situations where you require that a type implements
/// a certain trait with `feature = std`, but not on `no_std`.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker! {
/// /// A marker for a type that implements `Debug` when `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(feature = "std")]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(feature = "std")]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(feature = "std"))]
pub trait $trait_name {}
#[cfg(not(feature = "std"))]
impl<T> $trait_name for T {}
)+
}
}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when either `feature = std` or `feature =
/// serde` is activated.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker_std_or_serde! {
/// /// A marker for a type that implements `Debug` when `feature = serde` or `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = serde` or `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker_std_or_serde {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(any(feature = "serde", feature = "std"))]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(any(feature = "serde", feature = "std"))]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(any(feature = "serde", feature = "std")))]
pub trait $trait_name {}
#[cfg(not(any(feature = "serde", feature = "std")))]
impl<T> $trait_name for T {}
)+
}
}
/// The maximum number of bytes that can be allocated at one time.
// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for
// everybody.
pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB
/// Generates a macro for checking if a certain feature is enabled.
///
/// These feature checking macros can be used to conditionally enable/disable code in a dependent
/// crate based on a feature in the crate where the macro is called.
///
/// # Example
///```
/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $);
/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $);
///
/// // All the code passed to the macro will then conditionally compiled based on the features
/// // activated for the crate where the macro was generated.
/// check_std_is_enabled! {
/// struct StdEnabled;
/// }
///```
#[macro_export]
// We need to skip formatting this macro because of this bug:
// https://github.com/rust-lang/rustfmt/issues/5283
#[rustfmt::skip]
macro_rules! generate_feature_enabled_macro {
( $macro_name:ident, $feature_name:meta, $d:tt ) => {
$crate::paste::paste!{
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg($feature_name)]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {
$d ( $d input )*
}
}
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg(not($feature_name))]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {};
}
// Work around for: <https://github.com/rust-lang/rust/pull/52234>
#[doc(hidden)]
pub use [<_ $macro_name>] as $macro_name;
}
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn generate_feature_enabled_macro_panics() {
generate_feature_enabled_macro!(if_test, test, $);
if_test!(panic!("This should panic"));
}
#[test]
fn generate_feature_enabled_macro_works() {
generate_feature_enabled_macro!(if_not_test, not(test), $);
if_not_test!(panic!("This should not panic"));
}
}
| from_str | identifier_name |
lib.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shareable Substrate types.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
/// Initialize a key-value collection from array.
///
/// Creates a vector of given pairs and calls `collect` on the iterator from it.
/// Can be used to create a `HashMap`.
#[macro_export]
macro_rules! map {
($( $name:expr => $value:expr ),* $(,)? ) => (
vec![ $( ( $name, $value ) ),* ].into_iter().collect()
);
}
#[doc(hidden)]
pub use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo; | #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use sp_runtime_interface::pass_by::{PassByEnum, PassByInner};
use sp_std::{ops::Deref, prelude::*};
pub use sp_debug_derive::RuntimeDebug;
#[cfg(feature = "serde")]
pub use impl_serde::serialize as bytes;
#[cfg(feature = "full_crypto")]
pub mod hashing;
#[cfg(feature = "full_crypto")]
pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64};
pub mod crypto;
pub mod hexdisplay;
pub use paste;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
pub mod bls;
pub mod defer;
pub mod ecdsa;
pub mod ed25519;
pub mod hash;
#[cfg(feature = "std")]
mod hasher;
pub mod offchain;
pub mod sr25519;
pub mod testing;
#[cfg(feature = "std")]
pub mod traits;
pub mod uint;
#[cfg(feature = "bls-experimental")]
pub use bls::{bls377, bls381};
pub use self::{
hash::{convert_hash, H160, H256, H512},
uint::{U256, U512},
};
#[cfg(feature = "full_crypto")]
pub use crypto::{ByteArray, DeriveJunction, Pair, Public};
#[cfg(feature = "std")]
pub use self::hasher::blake2::Blake2Hasher;
#[cfg(feature = "std")]
pub use self::hasher::keccak::KeccakHasher;
pub use hash_db::Hasher;
pub use bounded_collections as bounded;
#[cfg(feature = "std")]
pub use bounded_collections::{bounded_btree_map, bounded_vec};
pub use bounded_collections::{
parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128,
ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet,
};
pub use sp_storage as storage;
#[doc(hidden)]
pub use sp_std;
/// Hex-serialized shim for `Vec<u8>`.
#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
pub struct Bytes(#[cfg_attr(feature = "serde", serde(with = "bytes"))] pub Vec<u8>);
impl From<Vec<u8>> for Bytes {
fn from(s: Vec<u8>) -> Self {
Bytes(s)
}
}
impl From<OpaqueMetadata> for Bytes {
fn from(s: OpaqueMetadata) -> Self {
Bytes(s.0)
}
}
impl Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.0[..]
}
}
impl codec::WrapperTypeEncode for Bytes {}
impl codec::WrapperTypeDecode for Bytes {
type Wrapped = Vec<u8>;
}
#[cfg(feature = "std")]
impl sp_std::str::FromStr for Bytes {
type Err = bytes::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
bytes::from_hex(s).map(Bytes)
}
}
/// Stores the encoded `RuntimeMetadata` for the native side as opaque type.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
pub struct OpaqueMetadata(Vec<u8>);
impl OpaqueMetadata {
/// Creates a new instance with the given metadata blob.
pub fn new(metadata: Vec<u8>) -> Self {
OpaqueMetadata(metadata)
}
}
impl sp_std::ops::Deref for OpaqueMetadata {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Simple blob to hold a `PeerId` without committing to its format.
#[derive(
Default,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
PassByInner,
TypeInfo,
)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpaquePeerId(pub Vec<u8>);
impl OpaquePeerId {
/// Create new `OpaquePeerId`
pub fn new(vec: Vec<u8>) -> Self {
OpaquePeerId(vec)
}
}
/// Provide a simple 4 byte identifier for a type.
pub trait TypeId {
/// Simple 4 byte identifier.
const TYPE_ID: [u8; 4];
}
/// A log level matching the one from `log` crate.
///
/// Used internally by `sp_io::logging::log` method.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevel {
/// `Error` log level.
Error = 1_isize,
/// `Warn` log level.
Warn = 2_isize,
/// `Info` log level.
Info = 3_isize,
/// `Debug` log level.
Debug = 4_isize,
/// `Trace` log level.
Trace = 5_isize,
}
impl From<u32> for LogLevel {
fn from(val: u32) -> Self {
match val {
x if x == LogLevel::Warn as u32 => LogLevel::Warn,
x if x == LogLevel::Info as u32 => LogLevel::Info,
x if x == LogLevel::Debug as u32 => LogLevel::Debug,
x if x == LogLevel::Trace as u32 => LogLevel::Trace,
_ => LogLevel::Error,
}
}
}
impl From<log::Level> for LogLevel {
fn from(l: log::Level) -> Self {
use log::Level::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<LogLevel> for log::Level {
fn from(l: LogLevel) -> Self {
use self::LogLevel::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Log level filter that expresses which log levels should be filtered.
///
/// This enum matches the [`log::LevelFilter`] enum.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevelFilter {
/// `Off` log level filter.
Off = 0_isize,
/// `Error` log level filter.
Error = 1_isize,
/// `Warn` log level filter.
Warn = 2_isize,
/// `Info` log level filter.
Info = 3_isize,
/// `Debug` log level filter.
Debug = 4_isize,
/// `Trace` log level filter.
Trace = 5_isize,
}
impl From<LogLevelFilter> for log::LevelFilter {
fn from(l: LogLevelFilter) -> Self {
use self::LogLevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<log::LevelFilter> for LogLevelFilter {
fn from(l: log::LevelFilter) -> Self {
use log::LevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`.
///
/// When Substrate calls into Wasm it expects a fixed signature for functions exported
/// from the Wasm blob. The return value of this signature is always a `u64`.
/// This `u64` stores the pointer to the encoded return value and the length of this encoded value.
/// The low `32bits` are reserved for the pointer, followed by `32bit` for the length.
#[cfg(not(feature = "std"))]
pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
let encoded = value.encode();
let ptr = encoded.as_ptr() as u64;
let length = encoded.len() as u64;
let res = ptr | (length << 32);
// Leak the output vector to avoid it being freed.
// This is fine in a WASM context since the heap
// will be discarded after the call.
sp_std::mem::forget(encoded);
res
}
/// The void type - it cannot exist.
// Oh rust, you crack me up...
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub enum Void {}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require
/// the bound on `no_std`. This is useful for situations where you require that a type implements
/// a certain trait with `feature = std`, but not on `no_std`.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker! {
/// /// A marker for a type that implements `Debug` when `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(feature = "std")]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(feature = "std")]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(feature = "std"))]
pub trait $trait_name {}
#[cfg(not(feature = "std"))]
impl<T> $trait_name for T {}
)+
}
}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when either `feature = std` or `feature =
/// serde` is activated.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker_std_or_serde! {
/// /// A marker for a type that implements `Debug` when `feature = serde` or `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = serde` or `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker_std_or_serde {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(any(feature = "serde", feature = "std"))]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(any(feature = "serde", feature = "std"))]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(any(feature = "serde", feature = "std")))]
pub trait $trait_name {}
#[cfg(not(any(feature = "serde", feature = "std")))]
impl<T> $trait_name for T {}
)+
}
}
/// The maximum number of bytes that can be allocated at one time.
// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for
// everybody.
pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB
/// Generates a macro for checking if a certain feature is enabled.
///
/// These feature checking macros can be used to conditionally enable/disable code in a dependent
/// crate based on a feature in the crate where the macro is called.
///
/// # Example
///```
/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $);
/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $);
///
/// // All the code passed to the macro will then conditionally compiled based on the features
/// // activated for the crate where the macro was generated.
/// check_std_is_enabled! {
/// struct StdEnabled;
/// }
///```
#[macro_export]
// We need to skip formatting this macro because of this bug:
// https://github.com/rust-lang/rustfmt/issues/5283
#[rustfmt::skip]
macro_rules! generate_feature_enabled_macro {
( $macro_name:ident, $feature_name:meta, $d:tt ) => {
$crate::paste::paste!{
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg($feature_name)]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {
$d ( $d input )*
}
}
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg(not($feature_name))]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {};
}
// Work around for: <https://github.com/rust-lang/rust/pull/52234>
#[doc(hidden)]
pub use [<_ $macro_name>] as $macro_name;
}
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn generate_feature_enabled_macro_panics() {
generate_feature_enabled_macro!(if_test, test, $);
if_test!(panic!("This should panic"));
}
#[test]
fn generate_feature_enabled_macro_works() {
generate_feature_enabled_macro!(if_not_test, not(test), $);
if_not_test!(panic!("This should not panic"));
}
} | #[cfg(feature = "serde")]
pub use serde; | random_line_split |
lib.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shareable Substrate types.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
/// Initialize a key-value collection from array.
///
/// Creates a vector of given pairs and calls `collect` on the iterator from it.
/// Can be used to create a `HashMap`.
#[macro_export]
macro_rules! map {
($( $name:expr => $value:expr ),* $(,)? ) => (
vec![ $( ( $name, $value ) ),* ].into_iter().collect()
);
}
#[doc(hidden)]
pub use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;
#[cfg(feature = "serde")]
pub use serde;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use sp_runtime_interface::pass_by::{PassByEnum, PassByInner};
use sp_std::{ops::Deref, prelude::*};
pub use sp_debug_derive::RuntimeDebug;
#[cfg(feature = "serde")]
pub use impl_serde::serialize as bytes;
#[cfg(feature = "full_crypto")]
pub mod hashing;
#[cfg(feature = "full_crypto")]
pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64};
pub mod crypto;
pub mod hexdisplay;
pub use paste;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
pub mod bls;
pub mod defer;
pub mod ecdsa;
pub mod ed25519;
pub mod hash;
#[cfg(feature = "std")]
mod hasher;
pub mod offchain;
pub mod sr25519;
pub mod testing;
#[cfg(feature = "std")]
pub mod traits;
pub mod uint;
#[cfg(feature = "bls-experimental")]
pub use bls::{bls377, bls381};
pub use self::{
hash::{convert_hash, H160, H256, H512},
uint::{U256, U512},
};
#[cfg(feature = "full_crypto")]
pub use crypto::{ByteArray, DeriveJunction, Pair, Public};
#[cfg(feature = "std")]
pub use self::hasher::blake2::Blake2Hasher;
#[cfg(feature = "std")]
pub use self::hasher::keccak::KeccakHasher;
pub use hash_db::Hasher;
pub use bounded_collections as bounded;
#[cfg(feature = "std")]
pub use bounded_collections::{bounded_btree_map, bounded_vec};
pub use bounded_collections::{
parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128,
ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet,
};
pub use sp_storage as storage;
#[doc(hidden)]
pub use sp_std;
/// Hex-serialized shim for `Vec<u8>`.
#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
pub struct Bytes(#[cfg_attr(feature = "serde", serde(with = "bytes"))] pub Vec<u8>);
impl From<Vec<u8>> for Bytes {
fn from(s: Vec<u8>) -> Self {
Bytes(s)
}
}
impl From<OpaqueMetadata> for Bytes {
fn from(s: OpaqueMetadata) -> Self {
Bytes(s.0)
}
}
impl Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &[u8] |
}
impl codec::WrapperTypeEncode for Bytes {}
impl codec::WrapperTypeDecode for Bytes {
type Wrapped = Vec<u8>;
}
#[cfg(feature = "std")]
impl sp_std::str::FromStr for Bytes {
type Err = bytes::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
bytes::from_hex(s).map(Bytes)
}
}
/// Stores the encoded `RuntimeMetadata` for the native side as opaque type.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
pub struct OpaqueMetadata(Vec<u8>);
impl OpaqueMetadata {
/// Creates a new instance with the given metadata blob.
pub fn new(metadata: Vec<u8>) -> Self {
OpaqueMetadata(metadata)
}
}
impl sp_std::ops::Deref for OpaqueMetadata {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Simple blob to hold a `PeerId` without committing to its format.
#[derive(
Default,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
PassByInner,
TypeInfo,
)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpaquePeerId(pub Vec<u8>);
impl OpaquePeerId {
/// Create new `OpaquePeerId`
pub fn new(vec: Vec<u8>) -> Self {
OpaquePeerId(vec)
}
}
/// Provide a simple 4 byte identifier for a type.
pub trait TypeId {
/// Simple 4 byte identifier.
const TYPE_ID: [u8; 4];
}
/// A log level matching the one from `log` crate.
///
/// Used internally by `sp_io::logging::log` method.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevel {
/// `Error` log level.
Error = 1_isize,
/// `Warn` log level.
Warn = 2_isize,
/// `Info` log level.
Info = 3_isize,
/// `Debug` log level.
Debug = 4_isize,
/// `Trace` log level.
Trace = 5_isize,
}
impl From<u32> for LogLevel {
fn from(val: u32) -> Self {
match val {
x if x == LogLevel::Warn as u32 => LogLevel::Warn,
x if x == LogLevel::Info as u32 => LogLevel::Info,
x if x == LogLevel::Debug as u32 => LogLevel::Debug,
x if x == LogLevel::Trace as u32 => LogLevel::Trace,
_ => LogLevel::Error,
}
}
}
impl From<log::Level> for LogLevel {
fn from(l: log::Level) -> Self {
use log::Level::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<LogLevel> for log::Level {
fn from(l: LogLevel) -> Self {
use self::LogLevel::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Log level filter that expresses which log levels should be filtered.
///
/// This enum matches the [`log::LevelFilter`] enum.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevelFilter {
/// `Off` log level filter.
Off = 0_isize,
/// `Error` log level filter.
Error = 1_isize,
/// `Warn` log level filter.
Warn = 2_isize,
/// `Info` log level filter.
Info = 3_isize,
/// `Debug` log level filter.
Debug = 4_isize,
/// `Trace` log level filter.
Trace = 5_isize,
}
impl From<LogLevelFilter> for log::LevelFilter {
fn from(l: LogLevelFilter) -> Self {
use self::LogLevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<log::LevelFilter> for LogLevelFilter {
fn from(l: log::LevelFilter) -> Self {
use log::LevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`.
///
/// When Substrate calls into Wasm it expects a fixed signature for functions exported
/// from the Wasm blob. The return value of this signature is always a `u64`.
/// This `u64` stores the pointer to the encoded return value and the length of this encoded value.
/// The low `32bits` are reserved for the pointer, followed by `32bit` for the length.
#[cfg(not(feature = "std"))]
pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
let encoded = value.encode();
let ptr = encoded.as_ptr() as u64;
let length = encoded.len() as u64;
let res = ptr | (length << 32);
// Leak the output vector to avoid it being freed.
// This is fine in a WASM context since the heap
// will be discarded after the call.
sp_std::mem::forget(encoded);
res
}
/// The void type - it cannot exist.
// Oh rust, you crack me up...
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub enum Void {}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require
/// the bound on `no_std`. This is useful for situations where you require that a type implements
/// a certain trait with `feature = std`, but not on `no_std`.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker! {
/// /// A marker for a type that implements `Debug` when `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(feature = "std")]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(feature = "std")]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(feature = "std"))]
pub trait $trait_name {}
#[cfg(not(feature = "std"))]
impl<T> $trait_name for T {}
)+
}
}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when either `feature = std` or `feature =
/// serde` is activated.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker_std_or_serde! {
/// /// A marker for a type that implements `Debug` when `feature = serde` or `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = serde` or `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker_std_or_serde {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(any(feature = "serde", feature = "std"))]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(any(feature = "serde", feature = "std"))]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(any(feature = "serde", feature = "std")))]
pub trait $trait_name {}
#[cfg(not(any(feature = "serde", feature = "std")))]
impl<T> $trait_name for T {}
)+
}
}
/// The maximum number of bytes that can be allocated at one time.
// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for
// everybody.
pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB
/// Generates a macro for checking if a certain feature is enabled.
///
/// These feature checking macros can be used to conditionally enable/disable code in a dependent
/// crate based on a feature in the crate where the macro is called.
///
/// # Example
///```
/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $);
/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $);
///
/// // All the code passed to the macro will then conditionally compiled based on the features
/// // activated for the crate where the macro was generated.
/// check_std_is_enabled! {
/// struct StdEnabled;
/// }
///```
#[macro_export]
// We need to skip formatting this macro because of this bug:
// https://github.com/rust-lang/rustfmt/issues/5283
#[rustfmt::skip]
macro_rules! generate_feature_enabled_macro {
( $macro_name:ident, $feature_name:meta, $d:tt ) => {
$crate::paste::paste!{
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg($feature_name)]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {
$d ( $d input )*
}
}
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg(not($feature_name))]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {};
}
// Work around for: <https://github.com/rust-lang/rust/pull/52234>
#[doc(hidden)]
pub use [<_ $macro_name>] as $macro_name;
}
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn generate_feature_enabled_macro_panics() {
generate_feature_enabled_macro!(if_test, test, $);
if_test!(panic!("This should panic"));
}
#[test]
fn generate_feature_enabled_macro_works() {
generate_feature_enabled_macro!(if_not_test, not(test), $);
if_not_test!(panic!("This should not panic"));
}
}
| {
&self.0[..]
} | identifier_body |
api.rs | //! This API contains all you will need to interface your
//! your bot algorithm with the GTPv2 protocol.
//! Your main task will be to implement the GoBot trait.
use std::str::FromStr;
use std::vec::Vec;
/// Contains all the possible errors your bot
/// may return to the library.
/// Be careful, any callback returning an error it is not
/// supposed to will cause the lib to `panic!()`.
pub enum GTPError {
NotImplemented,
InvalidBoardSize,
InvalidMove,
BadVertexList,
BoardNotEmpty,
CannotUndo,
CannotScore,
}
/// Represents a player, Black or White.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Colour {
Black,
White
}
/// Represents a vertex of the board.
/// Note that board size is at most 25x25.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Vertex {
x: u8, // letter
y: u8 // number
}
/// Represents a move, either placing a stone, passing or resigning.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Move {
Stone(Vertex),
Pass,
Resign
}
/// Represents a move associated with a player.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct ColouredMove {
pub player: Colour,
pub mov: Move
}
/// The status of a stone : alive, dead or seki.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum StoneStatus {
Alive,
Seki,
Dead
}
/// This is the trait ised by the library to callback your bot.
/// You must implement some functions, the provided one correspond
/// to the optionnal commands of the protocol. If you want to
/// implement them, simply override them. If you do not, the library
/// will not report them as available.
pub trait Gtp {
/// The name of your bot (ex : "My super Bot")
fn name(&self) -> String;
/// The version of your bot (ex : "v2.3-r5")
fn version(&self) -> String;
// Any function returning a GTPError that it is not supposed
// to return will be fatal to the framework.
// Basic functions, must be implemented
| fn komi(&mut self, komi: f32) -> ();
/// Sets the board size.
/// Returns `Err(InvalidBoardSize)` if the size is not supported.
/// The protocol cannot handle board sizes > 25x25.
fn boardsize(&mut self, size: usize) -> Result<(), GTPError>;
/// Plays the provided move on the board.
/// Returns `Err(InvalidMove)` is the move is invalid.
/// The protocol does not forbid the same player player twice in a row.
fn play(&mut self, mov: ColouredMove) -> Result<(), GTPError>;
/// Ask the bot for a move for the chose player.
/// Cannot fail, the bot must provide a move even if the last
/// played move is of the same colour.
/// Plays the move in the internal representation of the game of the bot.
fn genmove(&mut self, player: Colour) -> Move;
// Optional functions, if not iplemented, the corresponding
// commands will not be activated
// All these functions will be called once by the framework
// at startup, then clear_board will be called
/// Asks the bot for a move for the chosen player.
/// Must be deterministic, and must not actually play the move.
/// Should always return `Ok(Move)`, never raise any error.
#[allow(unused_variables)]
fn reg_genmove(&self, player: Colour) -> Result<Move, GTPError> {
Err(GTPError::NotImplemented)
}
/// Undo last move if possible.
/// If not, return `Err(CannotUndo)`.
/// If undo is never possible, should not be implemented.
fn undo(&mut self) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places handicap stones for black
/// according to pre-defined patterns, see specification of GTPv2.
/// Returns a vertex of choosen stones.
/// Can fail with `Err(boardNotEmpty)`.
/// The library garanties `number` will always be between 2 and 9 included.
#[allow(unused_variables)]
fn fixed_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places its handicap stones
/// and returns a vector of Vertexes.
/// It can place less stones if the asked number is too high.
/// Can fail with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty
#[allow(unused_variables)]
fn place_free_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Uses the provided list as handicap stones for black.
/// Fails with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty.
/// Fails with `Err(BadVertexList)` if the vertex list is unusable
/// (two stones at the same place, or stones outside the board).
#[allow(unused_variables)]
fn set_free_handicap(&mut self, stones: &[Vertex]) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Sets the time settings for the game.
/// It is only informative, the bot should count it's own time,
/// but the controller is supposed to enforce it.
/// Time are give in minute, should never fail.
#[allow(unused_variables)]
fn time_settings(&mut self, main_time: usize, byoyomi_time: usize, byoyomi_stones: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a vector of stones of both color in the given status,
/// in the opinion of the bot.
/// Should never fail.
#[allow(unused_variables)]
fn final_status_list(&self, status: StoneStatus) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Computes the bot's calculation of the final score.
/// If it is a draw, float value must be 0 and colour is not important.
/// Can fail with èErr(CannotScore)`.
fn final_score(&self) -> Result<(f32, Colour), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a description of the board as saw by the bot :
/// (boardsize, black_stones, white_stones, black_captured_count, white_captured_count).
/// Should never fail.
fn showboard(&self) -> Result<(usize, Vec<Vertex>, Vec<Vertex>, usize, usize), GTPError> {
Err(GTPError::NotImplemented)
}
/// Allow you to handle custom commands. Returns (succes, output).
#[allow(unused_variables)]
fn custom_command(&mut self, command: &str, args: &str) -> (bool, String) {
(false, "invalid command".to_string())
}
/// Returns true if the given custom command is known.
#[allow(unused_variables)]
fn known_custom_command(&self, command: &str) -> bool {
false
}
/// Returns the list of you custom commands.
fn list_custom_commands(&self) -> Vec<String> {
Vec::new()
}
#[allow(unused_variables)]
fn loadsgf(&mut self, &str, n: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
}
// Vertex implementation for messing with strings
impl Vertex {
/// Creates a vertex from 2 numerical coords.
/// Both must be between 1 and 25.
pub fn from_coords(x: u8, y:u8) -> Option<Vertex> {
if x == 0 || x > 25 || y == 0 || y > 25 {
None
} else {
Some(Vertex{x: x, y: y})
}
}
/// Creates a vertex from board coordinates (from A1 to Z25).
/// Remember that letter I is banned.
pub fn from_str(text: &str) -> Option<Vertex> {
if text.len() < 2 || text.len() > 3 {
return None;
}
let mut x: u8 = text.as_bytes()[0];
if x < ('A' as u8) || x > ('Z' as u8) || (x as char) == 'I' {
return None;
}
x -= ('A' as u8) - 1;
if x > 9 {
x -= 1;
} // eliminate 'I'
let number = u8::from_str(&text[1..]);
let mut y: u8 = 0;
match number {
Ok(num) => y = num,
_ => (),
}
if y == 0 || y > 25 {
return None;
}
Some(Vertex{x: x, y: y})
}
/// Returns a tuple of coordinates.
pub fn to_coords(&self) -> (u8, u8) {
(self.x, self.y)
}
/// Returns the string representation of this vertex (ex: G12).
pub fn to_string(&self) -> String {
let mut letter: u8 = 'A' as u8;
if self.x >= 9 {
// eliminate 'I'
letter += self.x;
} else {
letter += self.x-1;
}
format!("{}{}", letter as char, self.y)
}
}
impl Move {
/// Returns a string representation of the move compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Move::Stone(vrtx) => vrtx.to_string(),
Move::Pass => "pass".to_string(),
Move::Resign => "resign".to_string(),
}
}
}
impl Colour {
/// Returns a string representation of the color compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Colour::White => "white".to_string(),
Colour::Black => "black".to_string(),
}
}
}
impl ColouredMove {
/// Returns a string representation of the colored move compatible
/// with GTPv2.
pub fn to_string(&self) -> String {
self.player.to_string() + &self.mov.to_string()
}
}
#[cfg(test)]
mod tests {
#[test]
fn vertex_to_string() {
let vrtx1 = super::Vertex::from_coords(8u8, 7u8).unwrap();
assert_eq!(&vrtx1.to_string(), "H7");
let vrtx2 = super::Vertex::from_coords(9u8, 13u8).unwrap();
assert_eq!(&vrtx2.to_string(), "J13");
let vrtx3 = super::Vertex::from_coords(19u8, 1u8).unwrap();
assert_eq!(&vrtx3.to_string(), "T1");
}
#[test]
fn string_to_vertex() {
let vrtx1 = super::Vertex::from_str("C7").unwrap();
assert_eq!(vrtx1.to_coords(), (3u8, 7u8));
let vrtx2 = super::Vertex::from_str("J11").unwrap();
assert_eq!(vrtx2.to_coords(), (9u8, 11u8));
let vrtx3 = super::Vertex::from_str("Z25").unwrap();
assert_eq!(vrtx3.to_coords(), (25u8, 25u8));
}
#[test]
#[should_panic]
fn too_big_coordinates() {
let vrtx = super::Vertex::from_coords(26u8, 13u8).unwrap();
assert_eq!(vrtx.to_coords(), (26u8, 13u8));
}
#[test]
#[should_panic]
fn invalid_string() {
let vrtx = super::Vertex::from_str("I13").unwrap();
assert_eq!(vrtx.to_coords(), (9u8, 13u8));
}
} | /// Clears the board, can never fail.
fn clear_board(&mut self) -> ();
/// Sets the komi, can never fail, must accept absurd values. | random_line_split |
api.rs | //! This API contains all you will need to interface your
//! your bot algorithm with the GTPv2 protocol.
//! Your main task will be to implement the GoBot trait.
use std::str::FromStr;
use std::vec::Vec;
/// Contains all the possible errors your bot
/// may return to the library.
/// Be careful, any callback returning an error it is not
/// supposed to will cause the lib to `panic!()`.
pub enum GTPError {
NotImplemented,
InvalidBoardSize,
InvalidMove,
BadVertexList,
BoardNotEmpty,
CannotUndo,
CannotScore,
}
/// Represents a player, Black or White.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Colour {
Black,
White
}
/// Represents a vertex of the board.
/// Note that board size is at most 25x25.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Vertex {
x: u8, // letter
y: u8 // number
}
/// Represents a move, either placing a stone, passing or resigning.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Move {
Stone(Vertex),
Pass,
Resign
}
/// Represents a move associated with a player.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct ColouredMove {
pub player: Colour,
pub mov: Move
}
/// The status of a stone : alive, dead or seki.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum StoneStatus {
Alive,
Seki,
Dead
}
/// This is the trait ised by the library to callback your bot.
/// You must implement some functions, the provided one correspond
/// to the optionnal commands of the protocol. If you want to
/// implement them, simply override them. If you do not, the library
/// will not report them as available.
pub trait Gtp {
/// The name of your bot (ex : "My super Bot")
fn name(&self) -> String;
/// The version of your bot (ex : "v2.3-r5")
fn version(&self) -> String;
// Any function returning a GTPError that it is not supposed
// to return will be fatal to the framework.
// Basic functions, must be implemented
/// Clears the board, can never fail.
fn clear_board(&mut self) -> ();
/// Sets the komi, can never fail, must accept absurd values.
fn komi(&mut self, komi: f32) -> ();
/// Sets the board size.
/// Returns `Err(InvalidBoardSize)` if the size is not supported.
/// The protocol cannot handle board sizes > 25x25.
fn boardsize(&mut self, size: usize) -> Result<(), GTPError>;
/// Plays the provided move on the board.
/// Returns `Err(InvalidMove)` is the move is invalid.
/// The protocol does not forbid the same player player twice in a row.
fn play(&mut self, mov: ColouredMove) -> Result<(), GTPError>;
/// Ask the bot for a move for the chose player.
/// Cannot fail, the bot must provide a move even if the last
/// played move is of the same colour.
/// Plays the move in the internal representation of the game of the bot.
fn genmove(&mut self, player: Colour) -> Move;
// Optional functions, if not iplemented, the corresponding
// commands will not be activated
// All these functions will be called once by the framework
// at startup, then clear_board will be called
/// Asks the bot for a move for the chosen player.
/// Must be deterministic, and must not actually play the move.
/// Should always return `Ok(Move)`, never raise any error.
#[allow(unused_variables)]
fn reg_genmove(&self, player: Colour) -> Result<Move, GTPError> {
Err(GTPError::NotImplemented)
}
/// Undo last move if possible.
/// If not, return `Err(CannotUndo)`.
/// If undo is never possible, should not be implemented.
fn undo(&mut self) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places handicap stones for black
/// according to pre-defined patterns, see specification of GTPv2.
/// Returns a vertex of choosen stones.
/// Can fail with `Err(boardNotEmpty)`.
/// The library garanties `number` will always be between 2 and 9 included.
#[allow(unused_variables)]
fn fixed_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places its handicap stones
/// and returns a vector of Vertexes.
/// It can place less stones if the asked number is too high.
/// Can fail with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty
#[allow(unused_variables)]
fn place_free_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Uses the provided list as handicap stones for black.
/// Fails with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty.
/// Fails with `Err(BadVertexList)` if the vertex list is unusable
/// (two stones at the same place, or stones outside the board).
#[allow(unused_variables)]
fn set_free_handicap(&mut self, stones: &[Vertex]) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Sets the time settings for the game.
/// It is only informative, the bot should count it's own time,
/// but the controller is supposed to enforce it.
/// Time are give in minute, should never fail.
#[allow(unused_variables)]
fn time_settings(&mut self, main_time: usize, byoyomi_time: usize, byoyomi_stones: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a vector of stones of both color in the given status,
/// in the opinion of the bot.
/// Should never fail.
#[allow(unused_variables)]
fn final_status_list(&self, status: StoneStatus) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Computes the bot's calculation of the final score.
/// If it is a draw, float value must be 0 and colour is not important.
/// Can fail with èErr(CannotScore)`.
fn final_score(&self) -> Result<(f32, Colour), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a description of the board as saw by the bot :
/// (boardsize, black_stones, white_stones, black_captured_count, white_captured_count).
/// Should never fail.
fn showboard(&self) -> Result<(usize, Vec<Vertex>, Vec<Vertex>, usize, usize), GTPError> {
Err(GTPError::NotImplemented)
}
/// Allow you to handle custom commands. Returns (succes, output).
#[allow(unused_variables)]
fn custom_command(&mut self, command: &str, args: &str) -> (bool, String) {
(false, "invalid command".to_string())
}
/// Returns true if the given custom command is known.
#[allow(unused_variables)]
fn known_custom_command(&self, command: &str) -> bool {
false
}
/// Returns the list of you custom commands.
fn list_custom_commands(&self) -> Vec<String> {
Vec::new()
}
#[allow(unused_variables)]
fn loadsgf(&mut self, &str, n: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
}
// Vertex implementation for messing with strings
impl Vertex {
/// Creates a vertex from 2 numerical coords.
/// Both must be between 1 and 25.
pub fn from_coords(x: u8, y:u8) -> Option<Vertex> {
if x == 0 || x > 25 || y == 0 || y > 25 {
None
} else {
Some(Vertex{x: x, y: y})
}
}
/// Creates a vertex from board coordinates (from A1 to Z25).
/// Remember that letter I is banned.
pub fn from_str(text: &str) -> Option<Vertex> {
if text.len() < 2 || text.len() > 3 {
return None;
}
let mut x: u8 = text.as_bytes()[0];
if x < ('A' as u8) || x > ('Z' as u8) || (x as char) == 'I' {
return None;
}
x -= ('A' as u8) - 1;
if x > 9 { | // eliminate 'I'
let number = u8::from_str(&text[1..]);
let mut y: u8 = 0;
match number {
Ok(num) => y = num,
_ => (),
}
if y == 0 || y > 25 {
return None;
}
Some(Vertex{x: x, y: y})
}
/// Returns a tuple of coordinates.
pub fn to_coords(&self) -> (u8, u8) {
(self.x, self.y)
}
/// Returns the string representation of this vertex (ex: G12).
pub fn to_string(&self) -> String {
let mut letter: u8 = 'A' as u8;
if self.x >= 9 {
// eliminate 'I'
letter += self.x;
} else {
letter += self.x-1;
}
format!("{}{}", letter as char, self.y)
}
}
impl Move {
/// Returns a string representation of the move compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Move::Stone(vrtx) => vrtx.to_string(),
Move::Pass => "pass".to_string(),
Move::Resign => "resign".to_string(),
}
}
}
impl Colour {
/// Returns a string representation of the color compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Colour::White => "white".to_string(),
Colour::Black => "black".to_string(),
}
}
}
impl ColouredMove {
/// Returns a string representation of the colored move compatible
/// with GTPv2.
pub fn to_string(&self) -> String {
self.player.to_string() + &self.mov.to_string()
}
}
#[cfg(test)]
mod tests {
#[test]
fn vertex_to_string() {
let vrtx1 = super::Vertex::from_coords(8u8, 7u8).unwrap();
assert_eq!(&vrtx1.to_string(), "H7");
let vrtx2 = super::Vertex::from_coords(9u8, 13u8).unwrap();
assert_eq!(&vrtx2.to_string(), "J13");
let vrtx3 = super::Vertex::from_coords(19u8, 1u8).unwrap();
assert_eq!(&vrtx3.to_string(), "T1");
}
#[test]
fn string_to_vertex() {
let vrtx1 = super::Vertex::from_str("C7").unwrap();
assert_eq!(vrtx1.to_coords(), (3u8, 7u8));
let vrtx2 = super::Vertex::from_str("J11").unwrap();
assert_eq!(vrtx2.to_coords(), (9u8, 11u8));
let vrtx3 = super::Vertex::from_str("Z25").unwrap();
assert_eq!(vrtx3.to_coords(), (25u8, 25u8));
}
#[test]
#[should_panic]
fn too_big_coordinates() {
let vrtx = super::Vertex::from_coords(26u8, 13u8).unwrap();
assert_eq!(vrtx.to_coords(), (26u8, 13u8));
}
#[test]
#[should_panic]
fn invalid_string() {
let vrtx = super::Vertex::from_str("I13").unwrap();
assert_eq!(vrtx.to_coords(), (9u8, 13u8));
}
}
|
x -= 1;
} | conditional_block |
api.rs | //! This API contains all you will need to interface your
//! your bot algorithm with the GTPv2 protocol.
//! Your main task will be to implement the GoBot trait.
use std::str::FromStr;
use std::vec::Vec;
/// Contains all the possible errors your bot
/// may return to the library.
/// Be careful, any callback returning an error it is not
/// supposed to will cause the lib to `panic!()`.
pub enum GTPError {
NotImplemented,
InvalidBoardSize,
InvalidMove,
BadVertexList,
BoardNotEmpty,
CannotUndo,
CannotScore,
}
/// Represents a player, Black or White.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Colour {
Black,
White
}
/// Represents a vertex of the board.
/// Note that board size is at most 25x25.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Vertex {
x: u8, // letter
y: u8 // number
}
/// Represents a move, either placing a stone, passing or resigning.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Move {
Stone(Vertex),
Pass,
Resign
}
/// Represents a move associated with a player.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct ColouredMove {
pub player: Colour,
pub mov: Move
}
/// The status of a stone : alive, dead or seki.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum StoneStatus {
Alive,
Seki,
Dead
}
/// This is the trait ised by the library to callback your bot.
/// You must implement some functions, the provided one correspond
/// to the optionnal commands of the protocol. If you want to
/// implement them, simply override them. If you do not, the library
/// will not report them as available.
pub trait Gtp {
/// The name of your bot (ex : "My super Bot")
fn name(&self) -> String;
/// The version of your bot (ex : "v2.3-r5")
fn version(&self) -> String;
// Any function returning a GTPError that it is not supposed
// to return will be fatal to the framework.
// Basic functions, must be implemented
/// Clears the board, can never fail.
fn clear_board(&mut self) -> ();
/// Sets the komi, can never fail, must accept absurd values.
fn komi(&mut self, komi: f32) -> ();
/// Sets the board size.
/// Returns `Err(InvalidBoardSize)` if the size is not supported.
/// The protocol cannot handle board sizes > 25x25.
fn boardsize(&mut self, size: usize) -> Result<(), GTPError>;
/// Plays the provided move on the board.
/// Returns `Err(InvalidMove)` is the move is invalid.
/// The protocol does not forbid the same player player twice in a row.
fn play(&mut self, mov: ColouredMove) -> Result<(), GTPError>;
/// Ask the bot for a move for the chose player.
/// Cannot fail, the bot must provide a move even if the last
/// played move is of the same colour.
/// Plays the move in the internal representation of the game of the bot.
fn genmove(&mut self, player: Colour) -> Move;
// Optional functions, if not iplemented, the corresponding
// commands will not be activated
// All these functions will be called once by the framework
// at startup, then clear_board will be called
/// Asks the bot for a move for the chosen player.
/// Must be deterministic, and must not actually play the move.
/// Should always return `Ok(Move)`, never raise any error.
#[allow(unused_variables)]
fn reg_genmove(&self, player: Colour) -> Result<Move, GTPError> {
Err(GTPError::NotImplemented)
}
/// Undo last move if possible.
/// If not, return `Err(CannotUndo)`.
/// If undo is never possible, should not be implemented.
fn undo(&mut self) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places handicap stones for black
/// according to pre-defined patterns, see specification of GTPv2.
/// Returns a vertex of choosen stones.
/// Can fail with `Err(boardNotEmpty)`.
/// The library garanties `number` will always be between 2 and 9 included.
#[allow(unused_variables)]
fn fixed_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places its handicap stones
/// and returns a vector of Vertexes.
/// It can place less stones if the asked number is too high.
/// Can fail with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty
#[allow(unused_variables)]
fn place_free_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Uses the provided list as handicap stones for black.
/// Fails with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty.
/// Fails with `Err(BadVertexList)` if the vertex list is unusable
/// (two stones at the same place, or stones outside the board).
#[allow(unused_variables)]
fn set_free_handicap(&mut self, stones: &[Vertex]) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Sets the time settings for the game.
/// It is only informative, the bot should count it's own time,
/// but the controller is supposed to enforce it.
/// Time are give in minute, should never fail.
#[allow(unused_variables)]
fn time_settings(&mut self, main_time: usize, byoyomi_time: usize, byoyomi_stones: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a vector of stones of both color in the given status,
/// in the opinion of the bot.
/// Should never fail.
#[allow(unused_variables)]
fn | (&self, status: StoneStatus) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Computes the bot's calculation of the final score.
/// If it is a draw, float value must be 0 and colour is not important.
/// Can fail with èErr(CannotScore)`.
fn final_score(&self) -> Result<(f32, Colour), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a description of the board as saw by the bot :
/// (boardsize, black_stones, white_stones, black_captured_count, white_captured_count).
/// Should never fail.
fn showboard(&self) -> Result<(usize, Vec<Vertex>, Vec<Vertex>, usize, usize), GTPError> {
Err(GTPError::NotImplemented)
}
/// Allow you to handle custom commands. Returns (succes, output).
#[allow(unused_variables)]
fn custom_command(&mut self, command: &str, args: &str) -> (bool, String) {
(false, "invalid command".to_string())
}
/// Returns true if the given custom command is known.
#[allow(unused_variables)]
fn known_custom_command(&self, command: &str) -> bool {
false
}
/// Returns the list of you custom commands.
fn list_custom_commands(&self) -> Vec<String> {
Vec::new()
}
#[allow(unused_variables)]
fn loadsgf(&mut self, &str, n: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
}
// Vertex implementation for messing with strings
impl Vertex {
/// Creates a vertex from 2 numerical coords.
/// Both must be between 1 and 25.
pub fn from_coords(x: u8, y:u8) -> Option<Vertex> {
if x == 0 || x > 25 || y == 0 || y > 25 {
None
} else {
Some(Vertex{x: x, y: y})
}
}
/// Creates a vertex from board coordinates (from A1 to Z25).
/// Remember that letter I is banned.
pub fn from_str(text: &str) -> Option<Vertex> {
if text.len() < 2 || text.len() > 3 {
return None;
}
let mut x: u8 = text.as_bytes()[0];
if x < ('A' as u8) || x > ('Z' as u8) || (x as char) == 'I' {
return None;
}
x -= ('A' as u8) - 1;
if x > 9 {
x -= 1;
} // eliminate 'I'
let number = u8::from_str(&text[1..]);
let mut y: u8 = 0;
match number {
Ok(num) => y = num,
_ => (),
}
if y == 0 || y > 25 {
return None;
}
Some(Vertex{x: x, y: y})
}
/// Returns a tuple of coordinates.
pub fn to_coords(&self) -> (u8, u8) {
(self.x, self.y)
}
/// Returns the string representation of this vertex (ex: G12).
pub fn to_string(&self) -> String {
let mut letter: u8 = 'A' as u8;
if self.x >= 9 {
// eliminate 'I'
letter += self.x;
} else {
letter += self.x-1;
}
format!("{}{}", letter as char, self.y)
}
}
impl Move {
/// Returns a string representation of the move compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Move::Stone(vrtx) => vrtx.to_string(),
Move::Pass => "pass".to_string(),
Move::Resign => "resign".to_string(),
}
}
}
impl Colour {
/// Returns a string representation of the color compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Colour::White => "white".to_string(),
Colour::Black => "black".to_string(),
}
}
}
impl ColouredMove {
/// Returns a string representation of the colored move compatible
/// with GTPv2.
pub fn to_string(&self) -> String {
self.player.to_string() + &self.mov.to_string()
}
}
#[cfg(test)]
mod tests {
#[test]
fn vertex_to_string() {
let vrtx1 = super::Vertex::from_coords(8u8, 7u8).unwrap();
assert_eq!(&vrtx1.to_string(), "H7");
let vrtx2 = super::Vertex::from_coords(9u8, 13u8).unwrap();
assert_eq!(&vrtx2.to_string(), "J13");
let vrtx3 = super::Vertex::from_coords(19u8, 1u8).unwrap();
assert_eq!(&vrtx3.to_string(), "T1");
}
#[test]
fn string_to_vertex() {
let vrtx1 = super::Vertex::from_str("C7").unwrap();
assert_eq!(vrtx1.to_coords(), (3u8, 7u8));
let vrtx2 = super::Vertex::from_str("J11").unwrap();
assert_eq!(vrtx2.to_coords(), (9u8, 11u8));
let vrtx3 = super::Vertex::from_str("Z25").unwrap();
assert_eq!(vrtx3.to_coords(), (25u8, 25u8));
}
#[test]
#[should_panic]
fn too_big_coordinates() {
let vrtx = super::Vertex::from_coords(26u8, 13u8).unwrap();
assert_eq!(vrtx.to_coords(), (26u8, 13u8));
}
#[test]
#[should_panic]
fn invalid_string() {
let vrtx = super::Vertex::from_str("I13").unwrap();
assert_eq!(vrtx.to_coords(), (9u8, 13u8));
}
}
| final_status_list | identifier_name |
my.rs | use std::{io, iter};
use std::collections::{HashSet, HashMap};
use std::iter::FromIterator;
use std::ops::RangeFull;
use paths_builder::Path;
macro_rules! parse_input {
($x:expr, $t:ident) => ($x.trim().parse::<$t>().unwrap())
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Pt {
x: i32,
y: i32,
}
mod paths_builder {
pub use crate::paths_builder_impl::build_paths;
use crate::*;
#[derive(Debug, Clone)]
pub struct | {
pub moves: Vec<(i8, i8)>, // (dx, dy)
pub nowater: Vec<Pt>, // (x, y) sorted
pub noobstacles: Vec<Pt>, // (x, y) sorted
pub xmin: i32,
pub xmax: i32,
pub ymin: i32,
pub ymax: i32,
}
#[derive(Debug)]
pub struct Paths {
pub paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>>,
}
}
struct BallPaths {
count: usize,
ball: usize,
paths: Vec<(paths_builder::Path,usize)>
}
struct Main {
width : usize,
height: usize,
field : Vec<u8>,
holes : Vec<Pt>,
balls : Vec<(Pt, u8)>,
water : HashSet<Pt>,
obstacles : HashSet<Pt>,
ball_paths : Vec<BallPaths>,
}
impl Main {
fn new(width: i32, height: i32) -> Self {
eprintln!("field size: {} x {}", width, height);
Self {
width: width as usize,
height: height as usize,
field: vec![0u8; (width * height) as usize],
balls: Vec::new(),
holes: Vec::new(),
water: HashSet::new(),
obstacles: HashSet::new(),
ball_paths: Vec::new(),
}
}
fn set_row(&mut self, row : usize, data : &[u8]) {
assert!(row < self.height);
assert!(data.len() == self.width);
let base = self.width * row;
for (col, &c) in data.iter().enumerate() {
self.field[base + col] = match c {
b'.' => 1,
b'X' => {//water
let coords = Pt{ x: col as i32, y: row as i32};
self.water.insert(coords);
0
},
b'H' => {//hole
let coords = Pt{ x: col as i32, y: row as i32};
self.holes.push(coords);
self.obstacles.insert(coords);
0
}
b'0'..=b'9' => {//ball
let coords = Pt{ x: col as i32, y: row as i32};
self.balls.push((coords, c - b'0'));
0
}
_=>panic!()
}
}
}
fn build_paths_for_brute_force(&mut self) -> u32 {
let max_ball = *self.balls.iter().map(|(_,sc)|sc).max().unwrap();
let paths = paths_builder::build_paths(max_ball as usize);
let mut ball_paths : Vec<BallPaths> = Vec::new();
for (ball_idx, ball) in self.balls.iter().enumerate() {
let mut v = BallPaths{ count: 0, ball: ball_idx, paths: Vec::new() };
let paths_by_shot_count = &paths.paths[&(ball.1 as usize)];
for (hole_id, hole) in self.holes.iter().enumerate() {
let dx = hole.x - ball.0.x;
let dy = hole.y - ball.0.y;
if let Some(paths_to_hole) = paths_by_shot_count.get(&(dx, dy)) {
for path in paths_to_hole.iter() {
if let Some(p) = path.from_point(ball.0.x, ball.0.y, self.width, self.height) {
if p.noobstacles.iter().any(|pt|self.obstacles.contains(&pt)) {
continue;
}
if p.nowater.iter().any(|pt|self.water.contains(&pt)) {
continue;
}
v.paths.push((p, hole_id));
v.count += 1;
}
}
}
}
ball_paths.push(v);
}
ball_paths.sort_unstable_by_key(|bp|bp.count);
let bc = ball_paths.iter().fold(1u32, |acc, bp|acc * (bp.count as u32));
self.ball_paths = ball_paths;
bc
}
fn r (&self, used_points : &HashSet<Pt>, used_holes: &HashSet<usize>, pos : usize) -> Option<Vec<(usize, Path)>> {
let is_leaf = pos + 1 == self.balls.len();
let paths = &self.ball_paths[pos];
'outer: for (path, hole) in paths.paths.iter() {
if used_holes.contains(&hole) {
continue;
}
for pt in path.noobstacles.iter().chain(path.nowater.iter()) {
if used_points.contains(pt) {
continue 'outer;
}
}
if is_leaf {
let mut s : Vec<(usize, Path)> = Vec::new();
s.push((paths.ball, path.clone()));
return Some(s);
}
let mut uh = used_holes.clone();
uh.insert(*hole);
let mut up = used_points.clone();
for &pt in path.noobstacles.iter().chain(path.nowater.iter()) {
up.insert(pt);
}
if let Some(mut s) = self.r(&up, &uh, pos + 1) {
s.push((paths.ball, path.clone()));
return Some(s);
}
}
None
}
fn solve(&self, brute_force_k: u32) -> Vec<(usize, Path)> {
if brute_force_k == 1
{
self.ball_paths.iter().map(|bp| (bp.ball, bp.paths[0].0.clone())).collect()
} else {
self.r(&HashSet::new(), &HashSet::new(), 0).unwrap()
}
}
fn run(&mut self) {
let bc = self.build_paths_for_brute_force();
eprintln!("balls: {}, combinations to brute force: {}", self.balls.len(), bc);
let solution = self.solve(bc);
// finally render paths:
let mut field = (0..self.height).map(|_|vec!['.'; self.width]).collect::<Vec<_>>();
for (ball, path) in solution.into_iter() {
let (mut p, mut d) = self.balls[ball];
for (mdx, mdy) in path.moves {
let c = if mdx < 0 {'<'} else if mdx > 0 {'>'} else if mdy < 0 {'^'} else {'v'};
for _ in 0..d {
field[p.y as usize][p.x as usize] = c;
p.x += mdx as i32;
p.y += mdy as i32;
}
d -= 1;
}
}
for row in field {
println!("{}", row.into_iter().collect::<String>());
}
}
}
fn main() {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let inputs = input_line.split(" ").collect::<Vec<_>>();
let width = parse_input!(inputs[0], i32);
let height = parse_input!(inputs[1], i32);
let mut main = Main::new(width, height);
for i in 0..height as usize {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let row = input_line.trim();//.to_string();
main.set_row(i, row.as_bytes());
}
//eprintln!("field size {} x {}, {} balls, {} holes, {} shots max", main.width, main.height, main.balls.len(), main.holes.len(),
// main.balls.iter().map(|(_,_,n)| *n as u32).sum::<u32>());
main.run();
}
mod paths_builder_impl {
use crate::paths_builder::*;
use crate::*;
impl paths_builder::Path {
pub fn from_point(
&self,
x: i32,
y: i32,
field_width: usize,
field_height: usize,
) -> Option<Path> {
if x + self.xmin < 0
|| x + self.xmax >= (field_width as i32)
|| y + self.ymin < 0
|| y + self.ymax >= (field_height as i32)
{
return None;
}
Some(Path {
moves: self.moves.clone(),
nowater: self
.nowater
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
noobstacles: self
.noobstacles
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
xmin: self.xmin + x,
xmax: self.xmax + x,
ymin: self.ymin + y,
ymax: self.ymax + y,
})
}
}
pub fn build_paths(max_dist: usize) -> Paths {
let mut state = State {
paths: HashMap::new(),
};
for dist in 1..=max_dist {
ff(&mut state, &Context::new(), dist);
}
let mut paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>> = HashMap::new();
for (coords, contexts) in state.paths.into_iter() {
for ctx in contexts.into_iter() {
let k0 = ctx.moves[0].1;
paths
.entry(k0)
.or_insert(HashMap::new())
.entry(coords)
.or_insert(Vec::new())
.push(context_to_path(ctx));
}
}
Paths { paths }
}
fn context_to_path(ctx: Context) -> Path {
let moves = ctx
.moves
.into_iter()
.map(|(dir, _)| (DIRS[dir].0 as i8, DIRS[dir].1 as i8))
.collect();
let cx = ctx.x;
let cy = ctx.y;
let mut nowater: Vec<Pt> = ctx
.nowater
.into_iter()
.filter_map(|pt| {
if pt.x == cx && pt.y == cy {
None
} else {
Some(pt)
}
})
.collect();
nowater.sort_unstable();
let mut noobstacles: Vec<Pt> = ctx
.used
.difference(&HashSet::from_iter(nowater.iter().cloned()))
.map(|&x| x)
.filter(|&pt| (pt.x!= 0 || pt.y!= 0) && (pt.x!= cx || pt.y!= cy))
.collect();
let mut xmin = ctx.x;
let mut xmax = ctx.x;
let mut ymin = ctx.y;
let mut ymax = ctx.y;
for &pt in nowater.iter().chain(noobstacles.iter()) {
xmin = xmin.min(pt.x);
xmax = xmax.max(pt.x);
ymin = ymin.min(pt.y);
ymax = ymax.max(pt.y);
}
noobstacles.sort_unstable();
Path {
moves,
nowater,
noobstacles,
xmin,
xmax,
ymin,
ymax,
}
}
fn ff(state: &mut State, ctx: &Context, dist: usize) {
let c = (ctx.x, ctx.y);
if ctx.x!= 0 || ctx.y!= 0 {
state.paths.entry(c).or_insert(Vec::new()).push(ctx.clone());
}
if dist == 0 {
return;
}
for d in 0..4 {
if let Some(c) = ctx.apply_move(d, dist) {
ff(state, &c, dist - 1);
}
}
}
struct State {
paths: HashMap<(i32, i32), Vec<Context>>,
}
#[derive(Clone)]
struct Context {
x: i32,
y: i32,
used: HashSet<Pt>, // (x, y)
moves: Vec<(usize, usize)>, // (dir, dist)
nowater: Vec<Pt>, // (x, y)
}
impl Context {
fn new() -> Self {
let mut used = HashSet::new();
used.insert(Pt { x: 0, y: 0 });
Self {
x: 0,
y: 0,
used,
moves: Vec::new(),
nowater: Vec::new(),
}
}
fn apply_move(&self, dir: usize, dist: usize) -> Option<Self> {
let mut x = self.x;
let mut y = self.y;
let dx = DIRS[dir].0;
let dy = DIRS[dir].1;
let mut used = self.used.clone();
if dist == 0 {
return None;
}
for _ in 0..dist {
x += dx;
y += dy;
if used.contains(&Pt { x, y }) {
return None;
}
used.insert(Pt { x, y });
}
let mut moves = self.moves.clone();
moves.push((dir, dist));
let mut nowater = self.nowater.clone();
nowater.push(Pt { x, y });
Some(Self {
x,
y,
used,
moves,
nowater,
})
}
}
const DIRS: &[(i32, i32)] = &[(-1, 0), (1, 0), (0, 1), (0, -1)];
}
| Path | identifier_name |
my.rs | use std::{io, iter};
use std::collections::{HashSet, HashMap};
use std::iter::FromIterator;
use std::ops::RangeFull;
use paths_builder::Path;
macro_rules! parse_input {
($x:expr, $t:ident) => ($x.trim().parse::<$t>().unwrap())
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Pt {
x: i32,
y: i32,
}
mod paths_builder {
pub use crate::paths_builder_impl::build_paths;
use crate::*;
#[derive(Debug, Clone)]
pub struct Path {
pub moves: Vec<(i8, i8)>, // (dx, dy)
pub nowater: Vec<Pt>, // (x, y) sorted
pub noobstacles: Vec<Pt>, // (x, y) sorted
pub xmin: i32,
pub xmax: i32,
pub ymin: i32, | pub struct Paths {
pub paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>>,
}
}
struct BallPaths {
count: usize,
ball: usize,
paths: Vec<(paths_builder::Path,usize)>
}
struct Main {
width : usize,
height: usize,
field : Vec<u8>,
holes : Vec<Pt>,
balls : Vec<(Pt, u8)>,
water : HashSet<Pt>,
obstacles : HashSet<Pt>,
ball_paths : Vec<BallPaths>,
}
impl Main {
fn new(width: i32, height: i32) -> Self {
eprintln!("field size: {} x {}", width, height);
Self {
width: width as usize,
height: height as usize,
field: vec![0u8; (width * height) as usize],
balls: Vec::new(),
holes: Vec::new(),
water: HashSet::new(),
obstacles: HashSet::new(),
ball_paths: Vec::new(),
}
}
fn set_row(&mut self, row : usize, data : &[u8]) {
assert!(row < self.height);
assert!(data.len() == self.width);
let base = self.width * row;
for (col, &c) in data.iter().enumerate() {
self.field[base + col] = match c {
b'.' => 1,
b'X' => {//water
let coords = Pt{ x: col as i32, y: row as i32};
self.water.insert(coords);
0
},
b'H' => {//hole
let coords = Pt{ x: col as i32, y: row as i32};
self.holes.push(coords);
self.obstacles.insert(coords);
0
}
b'0'..=b'9' => {//ball
let coords = Pt{ x: col as i32, y: row as i32};
self.balls.push((coords, c - b'0'));
0
}
_=>panic!()
}
}
}
fn build_paths_for_brute_force(&mut self) -> u32 {
let max_ball = *self.balls.iter().map(|(_,sc)|sc).max().unwrap();
let paths = paths_builder::build_paths(max_ball as usize);
let mut ball_paths : Vec<BallPaths> = Vec::new();
for (ball_idx, ball) in self.balls.iter().enumerate() {
let mut v = BallPaths{ count: 0, ball: ball_idx, paths: Vec::new() };
let paths_by_shot_count = &paths.paths[&(ball.1 as usize)];
for (hole_id, hole) in self.holes.iter().enumerate() {
let dx = hole.x - ball.0.x;
let dy = hole.y - ball.0.y;
if let Some(paths_to_hole) = paths_by_shot_count.get(&(dx, dy)) {
for path in paths_to_hole.iter() {
if let Some(p) = path.from_point(ball.0.x, ball.0.y, self.width, self.height) {
if p.noobstacles.iter().any(|pt|self.obstacles.contains(&pt)) {
continue;
}
if p.nowater.iter().any(|pt|self.water.contains(&pt)) {
continue;
}
v.paths.push((p, hole_id));
v.count += 1;
}
}
}
}
ball_paths.push(v);
}
ball_paths.sort_unstable_by_key(|bp|bp.count);
let bc = ball_paths.iter().fold(1u32, |acc, bp|acc * (bp.count as u32));
self.ball_paths = ball_paths;
bc
}
fn r (&self, used_points : &HashSet<Pt>, used_holes: &HashSet<usize>, pos : usize) -> Option<Vec<(usize, Path)>> {
let is_leaf = pos + 1 == self.balls.len();
let paths = &self.ball_paths[pos];
'outer: for (path, hole) in paths.paths.iter() {
if used_holes.contains(&hole) {
continue;
}
for pt in path.noobstacles.iter().chain(path.nowater.iter()) {
if used_points.contains(pt) {
continue 'outer;
}
}
if is_leaf {
let mut s : Vec<(usize, Path)> = Vec::new();
s.push((paths.ball, path.clone()));
return Some(s);
}
let mut uh = used_holes.clone();
uh.insert(*hole);
let mut up = used_points.clone();
for &pt in path.noobstacles.iter().chain(path.nowater.iter()) {
up.insert(pt);
}
if let Some(mut s) = self.r(&up, &uh, pos + 1) {
s.push((paths.ball, path.clone()));
return Some(s);
}
}
None
}
fn solve(&self, brute_force_k: u32) -> Vec<(usize, Path)> {
if brute_force_k == 1
{
self.ball_paths.iter().map(|bp| (bp.ball, bp.paths[0].0.clone())).collect()
} else {
self.r(&HashSet::new(), &HashSet::new(), 0).unwrap()
}
}
fn run(&mut self) {
let bc = self.build_paths_for_brute_force();
eprintln!("balls: {}, combinations to brute force: {}", self.balls.len(), bc);
let solution = self.solve(bc);
// finally render paths:
let mut field = (0..self.height).map(|_|vec!['.'; self.width]).collect::<Vec<_>>();
for (ball, path) in solution.into_iter() {
let (mut p, mut d) = self.balls[ball];
for (mdx, mdy) in path.moves {
let c = if mdx < 0 {'<'} else if mdx > 0 {'>'} else if mdy < 0 {'^'} else {'v'};
for _ in 0..d {
field[p.y as usize][p.x as usize] = c;
p.x += mdx as i32;
p.y += mdy as i32;
}
d -= 1;
}
}
for row in field {
println!("{}", row.into_iter().collect::<String>());
}
}
}
fn main() {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let inputs = input_line.split(" ").collect::<Vec<_>>();
let width = parse_input!(inputs[0], i32);
let height = parse_input!(inputs[1], i32);
let mut main = Main::new(width, height);
for i in 0..height as usize {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let row = input_line.trim();//.to_string();
main.set_row(i, row.as_bytes());
}
//eprintln!("field size {} x {}, {} balls, {} holes, {} shots max", main.width, main.height, main.balls.len(), main.holes.len(),
// main.balls.iter().map(|(_,_,n)| *n as u32).sum::<u32>());
main.run();
}
mod paths_builder_impl {
use crate::paths_builder::*;
use crate::*;
impl paths_builder::Path {
pub fn from_point(
&self,
x: i32,
y: i32,
field_width: usize,
field_height: usize,
) -> Option<Path> {
if x + self.xmin < 0
|| x + self.xmax >= (field_width as i32)
|| y + self.ymin < 0
|| y + self.ymax >= (field_height as i32)
{
return None;
}
Some(Path {
moves: self.moves.clone(),
nowater: self
.nowater
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
noobstacles: self
.noobstacles
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
xmin: self.xmin + x,
xmax: self.xmax + x,
ymin: self.ymin + y,
ymax: self.ymax + y,
})
}
}
pub fn build_paths(max_dist: usize) -> Paths {
let mut state = State {
paths: HashMap::new(),
};
for dist in 1..=max_dist {
ff(&mut state, &Context::new(), dist);
}
let mut paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>> = HashMap::new();
for (coords, contexts) in state.paths.into_iter() {
for ctx in contexts.into_iter() {
let k0 = ctx.moves[0].1;
paths
.entry(k0)
.or_insert(HashMap::new())
.entry(coords)
.or_insert(Vec::new())
.push(context_to_path(ctx));
}
}
Paths { paths }
}
fn context_to_path(ctx: Context) -> Path {
let moves = ctx
.moves
.into_iter()
.map(|(dir, _)| (DIRS[dir].0 as i8, DIRS[dir].1 as i8))
.collect();
let cx = ctx.x;
let cy = ctx.y;
let mut nowater: Vec<Pt> = ctx
.nowater
.into_iter()
.filter_map(|pt| {
if pt.x == cx && pt.y == cy {
None
} else {
Some(pt)
}
})
.collect();
nowater.sort_unstable();
let mut noobstacles: Vec<Pt> = ctx
.used
.difference(&HashSet::from_iter(nowater.iter().cloned()))
.map(|&x| x)
.filter(|&pt| (pt.x!= 0 || pt.y!= 0) && (pt.x!= cx || pt.y!= cy))
.collect();
let mut xmin = ctx.x;
let mut xmax = ctx.x;
let mut ymin = ctx.y;
let mut ymax = ctx.y;
for &pt in nowater.iter().chain(noobstacles.iter()) {
xmin = xmin.min(pt.x);
xmax = xmax.max(pt.x);
ymin = ymin.min(pt.y);
ymax = ymax.max(pt.y);
}
noobstacles.sort_unstable();
Path {
moves,
nowater,
noobstacles,
xmin,
xmax,
ymin,
ymax,
}
}
fn ff(state: &mut State, ctx: &Context, dist: usize) {
let c = (ctx.x, ctx.y);
if ctx.x!= 0 || ctx.y!= 0 {
state.paths.entry(c).or_insert(Vec::new()).push(ctx.clone());
}
if dist == 0 {
return;
}
for d in 0..4 {
if let Some(c) = ctx.apply_move(d, dist) {
ff(state, &c, dist - 1);
}
}
}
struct State {
paths: HashMap<(i32, i32), Vec<Context>>,
}
#[derive(Clone)]
struct Context {
x: i32,
y: i32,
used: HashSet<Pt>, // (x, y)
moves: Vec<(usize, usize)>, // (dir, dist)
nowater: Vec<Pt>, // (x, y)
}
impl Context {
fn new() -> Self {
let mut used = HashSet::new();
used.insert(Pt { x: 0, y: 0 });
Self {
x: 0,
y: 0,
used,
moves: Vec::new(),
nowater: Vec::new(),
}
}
fn apply_move(&self, dir: usize, dist: usize) -> Option<Self> {
let mut x = self.x;
let mut y = self.y;
let dx = DIRS[dir].0;
let dy = DIRS[dir].1;
let mut used = self.used.clone();
if dist == 0 {
return None;
}
for _ in 0..dist {
x += dx;
y += dy;
if used.contains(&Pt { x, y }) {
return None;
}
used.insert(Pt { x, y });
}
let mut moves = self.moves.clone();
moves.push((dir, dist));
let mut nowater = self.nowater.clone();
nowater.push(Pt { x, y });
Some(Self {
x,
y,
used,
moves,
nowater,
})
}
}
const DIRS: &[(i32, i32)] = &[(-1, 0), (1, 0), (0, 1), (0, -1)];
} | pub ymax: i32,
}
#[derive(Debug)] | random_line_split |
my.rs | use std::{io, iter};
use std::collections::{HashSet, HashMap};
use std::iter::FromIterator;
use std::ops::RangeFull;
use paths_builder::Path;
macro_rules! parse_input {
($x:expr, $t:ident) => ($x.trim().parse::<$t>().unwrap())
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Pt {
x: i32,
y: i32,
}
mod paths_builder {
pub use crate::paths_builder_impl::build_paths;
use crate::*;
#[derive(Debug, Clone)]
pub struct Path {
pub moves: Vec<(i8, i8)>, // (dx, dy)
pub nowater: Vec<Pt>, // (x, y) sorted
pub noobstacles: Vec<Pt>, // (x, y) sorted
pub xmin: i32,
pub xmax: i32,
pub ymin: i32,
pub ymax: i32,
}
#[derive(Debug)]
pub struct Paths {
pub paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>>,
}
}
struct BallPaths {
count: usize,
ball: usize,
paths: Vec<(paths_builder::Path,usize)>
}
struct Main {
width : usize,
height: usize,
field : Vec<u8>,
holes : Vec<Pt>,
balls : Vec<(Pt, u8)>,
water : HashSet<Pt>,
obstacles : HashSet<Pt>,
ball_paths : Vec<BallPaths>,
}
impl Main {
fn new(width: i32, height: i32) -> Self {
eprintln!("field size: {} x {}", width, height);
Self {
width: width as usize,
height: height as usize,
field: vec![0u8; (width * height) as usize],
balls: Vec::new(),
holes: Vec::new(),
water: HashSet::new(),
obstacles: HashSet::new(),
ball_paths: Vec::new(),
}
}
fn set_row(&mut self, row : usize, data : &[u8]) {
assert!(row < self.height);
assert!(data.len() == self.width);
let base = self.width * row;
for (col, &c) in data.iter().enumerate() {
self.field[base + col] = match c {
b'.' => 1,
b'X' => {//water
let coords = Pt{ x: col as i32, y: row as i32};
self.water.insert(coords);
0
},
b'H' => {//hole
let coords = Pt{ x: col as i32, y: row as i32};
self.holes.push(coords);
self.obstacles.insert(coords);
0
}
b'0'..=b'9' => {//ball
let coords = Pt{ x: col as i32, y: row as i32};
self.balls.push((coords, c - b'0'));
0
}
_=>panic!()
}
}
}
fn build_paths_for_brute_force(&mut self) -> u32 {
let max_ball = *self.balls.iter().map(|(_,sc)|sc).max().unwrap();
let paths = paths_builder::build_paths(max_ball as usize);
let mut ball_paths : Vec<BallPaths> = Vec::new();
for (ball_idx, ball) in self.balls.iter().enumerate() {
let mut v = BallPaths{ count: 0, ball: ball_idx, paths: Vec::new() };
let paths_by_shot_count = &paths.paths[&(ball.1 as usize)];
for (hole_id, hole) in self.holes.iter().enumerate() {
let dx = hole.x - ball.0.x;
let dy = hole.y - ball.0.y;
if let Some(paths_to_hole) = paths_by_shot_count.get(&(dx, dy)) {
for path in paths_to_hole.iter() {
if let Some(p) = path.from_point(ball.0.x, ball.0.y, self.width, self.height) {
if p.noobstacles.iter().any(|pt|self.obstacles.contains(&pt)) {
continue;
}
if p.nowater.iter().any(|pt|self.water.contains(&pt)) {
continue;
}
v.paths.push((p, hole_id));
v.count += 1;
}
}
}
}
ball_paths.push(v);
}
ball_paths.sort_unstable_by_key(|bp|bp.count);
let bc = ball_paths.iter().fold(1u32, |acc, bp|acc * (bp.count as u32));
self.ball_paths = ball_paths;
bc
}
fn r (&self, used_points : &HashSet<Pt>, used_holes: &HashSet<usize>, pos : usize) -> Option<Vec<(usize, Path)>> {
let is_leaf = pos + 1 == self.balls.len();
let paths = &self.ball_paths[pos];
'outer: for (path, hole) in paths.paths.iter() {
if used_holes.contains(&hole) |
for pt in path.noobstacles.iter().chain(path.nowater.iter()) {
if used_points.contains(pt) {
continue 'outer;
}
}
if is_leaf {
let mut s : Vec<(usize, Path)> = Vec::new();
s.push((paths.ball, path.clone()));
return Some(s);
}
let mut uh = used_holes.clone();
uh.insert(*hole);
let mut up = used_points.clone();
for &pt in path.noobstacles.iter().chain(path.nowater.iter()) {
up.insert(pt);
}
if let Some(mut s) = self.r(&up, &uh, pos + 1) {
s.push((paths.ball, path.clone()));
return Some(s);
}
}
None
}
fn solve(&self, brute_force_k: u32) -> Vec<(usize, Path)> {
if brute_force_k == 1
{
self.ball_paths.iter().map(|bp| (bp.ball, bp.paths[0].0.clone())).collect()
} else {
self.r(&HashSet::new(), &HashSet::new(), 0).unwrap()
}
}
fn run(&mut self) {
let bc = self.build_paths_for_brute_force();
eprintln!("balls: {}, combinations to brute force: {}", self.balls.len(), bc);
let solution = self.solve(bc);
// finally render paths:
let mut field = (0..self.height).map(|_|vec!['.'; self.width]).collect::<Vec<_>>();
for (ball, path) in solution.into_iter() {
let (mut p, mut d) = self.balls[ball];
for (mdx, mdy) in path.moves {
let c = if mdx < 0 {'<'} else if mdx > 0 {'>'} else if mdy < 0 {'^'} else {'v'};
for _ in 0..d {
field[p.y as usize][p.x as usize] = c;
p.x += mdx as i32;
p.y += mdy as i32;
}
d -= 1;
}
}
for row in field {
println!("{}", row.into_iter().collect::<String>());
}
}
}
fn main() {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let inputs = input_line.split(" ").collect::<Vec<_>>();
let width = parse_input!(inputs[0], i32);
let height = parse_input!(inputs[1], i32);
let mut main = Main::new(width, height);
for i in 0..height as usize {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let row = input_line.trim();//.to_string();
main.set_row(i, row.as_bytes());
}
//eprintln!("field size {} x {}, {} balls, {} holes, {} shots max", main.width, main.height, main.balls.len(), main.holes.len(),
// main.balls.iter().map(|(_,_,n)| *n as u32).sum::<u32>());
main.run();
}
mod paths_builder_impl {
use crate::paths_builder::*;
use crate::*;
impl paths_builder::Path {
pub fn from_point(
&self,
x: i32,
y: i32,
field_width: usize,
field_height: usize,
) -> Option<Path> {
if x + self.xmin < 0
|| x + self.xmax >= (field_width as i32)
|| y + self.ymin < 0
|| y + self.ymax >= (field_height as i32)
{
return None;
}
Some(Path {
moves: self.moves.clone(),
nowater: self
.nowater
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
noobstacles: self
.noobstacles
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
xmin: self.xmin + x,
xmax: self.xmax + x,
ymin: self.ymin + y,
ymax: self.ymax + y,
})
}
}
pub fn build_paths(max_dist: usize) -> Paths {
let mut state = State {
paths: HashMap::new(),
};
for dist in 1..=max_dist {
ff(&mut state, &Context::new(), dist);
}
let mut paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>> = HashMap::new();
for (coords, contexts) in state.paths.into_iter() {
for ctx in contexts.into_iter() {
let k0 = ctx.moves[0].1;
paths
.entry(k0)
.or_insert(HashMap::new())
.entry(coords)
.or_insert(Vec::new())
.push(context_to_path(ctx));
}
}
Paths { paths }
}
fn context_to_path(ctx: Context) -> Path {
let moves = ctx
.moves
.into_iter()
.map(|(dir, _)| (DIRS[dir].0 as i8, DIRS[dir].1 as i8))
.collect();
let cx = ctx.x;
let cy = ctx.y;
let mut nowater: Vec<Pt> = ctx
.nowater
.into_iter()
.filter_map(|pt| {
if pt.x == cx && pt.y == cy {
None
} else {
Some(pt)
}
})
.collect();
nowater.sort_unstable();
let mut noobstacles: Vec<Pt> = ctx
.used
.difference(&HashSet::from_iter(nowater.iter().cloned()))
.map(|&x| x)
.filter(|&pt| (pt.x!= 0 || pt.y!= 0) && (pt.x!= cx || pt.y!= cy))
.collect();
let mut xmin = ctx.x;
let mut xmax = ctx.x;
let mut ymin = ctx.y;
let mut ymax = ctx.y;
for &pt in nowater.iter().chain(noobstacles.iter()) {
xmin = xmin.min(pt.x);
xmax = xmax.max(pt.x);
ymin = ymin.min(pt.y);
ymax = ymax.max(pt.y);
}
noobstacles.sort_unstable();
Path {
moves,
nowater,
noobstacles,
xmin,
xmax,
ymin,
ymax,
}
}
fn ff(state: &mut State, ctx: &Context, dist: usize) {
let c = (ctx.x, ctx.y);
if ctx.x!= 0 || ctx.y!= 0 {
state.paths.entry(c).or_insert(Vec::new()).push(ctx.clone());
}
if dist == 0 {
return;
}
for d in 0..4 {
if let Some(c) = ctx.apply_move(d, dist) {
ff(state, &c, dist - 1);
}
}
}
struct State {
paths: HashMap<(i32, i32), Vec<Context>>,
}
#[derive(Clone)]
struct Context {
x: i32,
y: i32,
used: HashSet<Pt>, // (x, y)
moves: Vec<(usize, usize)>, // (dir, dist)
nowater: Vec<Pt>, // (x, y)
}
impl Context {
fn new() -> Self {
let mut used = HashSet::new();
used.insert(Pt { x: 0, y: 0 });
Self {
x: 0,
y: 0,
used,
moves: Vec::new(),
nowater: Vec::new(),
}
}
fn apply_move(&self, dir: usize, dist: usize) -> Option<Self> {
let mut x = self.x;
let mut y = self.y;
let dx = DIRS[dir].0;
let dy = DIRS[dir].1;
let mut used = self.used.clone();
if dist == 0 {
return None;
}
for _ in 0..dist {
x += dx;
y += dy;
if used.contains(&Pt { x, y }) {
return None;
}
used.insert(Pt { x, y });
}
let mut moves = self.moves.clone();
moves.push((dir, dist));
let mut nowater = self.nowater.clone();
nowater.push(Pt { x, y });
Some(Self {
x,
y,
used,
moves,
nowater,
})
}
}
const DIRS: &[(i32, i32)] = &[(-1, 0), (1, 0), (0, 1), (0, -1)];
}
| {
continue;
} | conditional_block |
lib.rs | // Copyright 2013-2014 The gl-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # gl_generator
//!
//! `gl_generator` is an OpenGL bindings generator plugin. It defines a macro named
//! `generate_gl_bindings!` which can be used to generate all constants and functions of a
//! given OpenGL version.
//!
//! ## Example
//!
//! ```rust
//! #[phase(plugin)]
//! extern crate gl_generator;
//! extern crate libc;
//!
//! use std::mem;
//! use self::types::*;
//!
//! generate_gl_bindings!("gl", "core", "4.5", "static", [ "GL_EXT_texture_filter_anisotropic" ])
//! ```
//!
//! ## Parameters
//!
//! * API: Can be `gl`, `wgl`, `glx`, `egl`. Only `gl` is supported for the moment.
//! * Profile: Can be `core` or `compatibility`. `core` will only include all functions supported
//! by the requested version it self, while `compatibility` will include all the functions from
//! previous versions as well.
//! * Version: The requested OpenGL version in the format `x.x`.
//! * Generator: Can be `static` or `struct`.
//! * Extensions (optional): An array of extensions to include in the bindings.
//!
#![crate_name = "gl_generator"]
#![comment = "OpenGL function loader generator."]
#![license = "ASL2"]
#![crate_type = "dylib"]
#![feature(phase)]
#![feature(globs)]
#![feature(macro_rules)]
#![feature(plugin_registrar)]
#![feature(quote)]
#[phase(plugin, link)]
extern crate log;
extern crate khronos_api;
extern crate rustc;
extern crate syntax;
use std::path::Path;
use std::io::{File, Reader};
use syntax::parse::token;
use syntax::ast::{ Item, TokenTree };
use syntax::ext::base::{expr_to_string, get_exprs_from_tts, DummyResult, ExtCtxt, MacResult};
use syntax::codemap::Span;
use registry::*;
use static_gen::StaticGenerator;
use struct_gen::StructGenerator;
mod common;
pub mod static_gen;
pub mod struct_gen;
pub mod registry;
pub mod ty;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut ::rustc::plugin::Registry) {
reg.register_macro("generate_gl_bindings", macro_handler);
}
// this is the object that we will return from the "generate_gl_bindings" macro expansion
struct MacroResult {
content: Vec<::std::gc::Gc<Item>>
}
impl MacResult for MacroResult {
fn make_def(&self) -> Option<::syntax::ext::base::MacroDef> { None }
fn make_expr(&self) -> Option<::std::gc::Gc<::syntax::ast::Expr>> { None }
fn make_pat(&self) -> Option<::std::gc::Gc<::syntax::ast::Pat>> { None }
fn make_stmt(&self) -> Option<::std::gc::Gc<::syntax::ast::Stmt>> { None }
fn | (&self) -> Option<::syntax::util::small_vector::SmallVector<::std::gc::Gc<Item>>> {
Some(::syntax::util::small_vector::SmallVector::many(self.content.clone()))
}
}
// handler for generate_gl_bindings!
fn macro_handler(ecx: &mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'static> {
// getting the arguments from the macro
let (api, profile, version, generator, extensions) = match parse_macro_arguments(ecx, span.clone(), token_tree) {
Some(t) => t,
None => return DummyResult::any(span)
};
let (ns, source) = match api.as_slice() {
"gl" => (Gl, khronos_api::GL_XML),
"glx" => {
ecx.span_err(span, "glx generation unimplemented");
return DummyResult::any(span)
},
"wgl" => {
ecx.span_err(span, "wgl generation unimplemented");
return DummyResult::any(span)
}
ns => {
ecx.span_err(span, format!("Unexpected opengl namespace '{}'", ns).as_slice());
return DummyResult::any(span)
}
};
let filter = Some(Filter {
extensions: extensions,
profile: profile,
version: version,
api: api,
});
// generating the registry of all bindings
let reg = {
use std::io::BufReader;
use std::task;
let result = task::try(proc() {
let reader = BufReader::new(source.as_bytes());
Registry::from_xml(reader, ns, filter)
});
match result {
Ok(reg) => reg,
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while parsing the registry");
}
}
return DummyResult::any(span);
}
}
};
// generating the Rust bindings as a source code into "buffer"
let buffer = {
use std::io::MemWriter;
use std::task;
// calling the generator
let result = match generator.as_slice() {
"static" => task::try(proc() {
let mut buffer = MemWriter::new();
StaticGenerator::write(&mut buffer, ®, ns);
buffer
}),
"struct" => task::try(proc() {
let mut buffer = MemWriter::new();
StructGenerator::write(&mut buffer, ®, ns);
buffer
}),
generator => {
ecx.span_err(span, format!("unknown generator type: {}", generator).as_slice());
return DummyResult::any(span);
},
};
// processing the result
match result {
Ok(buffer) => buffer.unwrap(),
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while generating the bindings");
}
}
return DummyResult::any(span);
}
}
};
// creating a new Rust parser from these bindings
let content = match String::from_utf8(buffer) {
Ok(s) => s,
Err(err) => {
ecx.span_err(span, format!("{}", err).as_slice());
return DummyResult::any(span)
}
};
let mut parser = ::syntax::parse::new_parser_from_source_str(ecx.parse_sess(), ecx.cfg(),
Path::new(ecx.codemap().span_to_filename(span)).display().to_string(), content);
// getting all the items defined by the bindings
let mut items = Vec::new();
loop {
match parser.parse_item_with_outer_attributes() {
None => break,
Some(i) => items.push(i)
}
}
if!parser.eat(&token::EOF) {
ecx.span_err(span, "the rust parser failed to compile all the generated bindings (meaning there is a bug in this library!)");
return DummyResult::any(span)
}
box MacroResult { content: items } as Box<MacResult>
}
fn parse_macro_arguments(ecx: &mut ExtCtxt, span: Span, tts: &[syntax::ast::TokenTree])
-> Option<(String, String, String, String, Vec<String>)>
{
// getting parameters list
let values = match get_exprs_from_tts(ecx, span, tts) {
Some(v) => v,
None => return None
};
if values.len()!= 4 && values.len()!= 5 {
ecx.span_err(span, format!("expected 4 or 5 arguments but got {}", values.len())
.as_slice());
return None;
}
// computing the extensions (last parameter)
let extensions: Vec<String> = match values.as_slice().get(4) {
None => Vec::new(),
Some(vector) => {
use syntax::ast::ExprVec;
match vector.node {
// only [... ] is accepted
ExprVec(ref list) => {
// turning each element into a string
let mut result = Vec::new();
for element in list.iter() {
match expr_to_string(ecx, element.clone(), "expected string literal") {
Some((s, _)) => result.push(s.get().to_string()),
None => return None
}
}
result
},
_ => {
ecx.span_err(span, format!("last argument must be a vector").as_slice());
return None;
}
}
}
};
// computing other parameters
match (
expr_to_string(ecx, values[0].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[1].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[2].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[3].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() })
) {
(Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d, extensions)),
_ => None
}
}
| make_items | identifier_name |
lib.rs | // Copyright 2013-2014 The gl-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # gl_generator
//!
//! `gl_generator` is an OpenGL bindings generator plugin. It defines a macro named
//! `generate_gl_bindings!` which can be used to generate all constants and functions of a
//! given OpenGL version.
//!
//! ## Example
//!
//! ```rust
//! #[phase(plugin)]
//! extern crate gl_generator;
//! extern crate libc;
//!
//! use std::mem;
//! use self::types::*;
//!
//! generate_gl_bindings!("gl", "core", "4.5", "static", [ "GL_EXT_texture_filter_anisotropic" ])
//! ```
//!
//! ## Parameters
//!
//! * API: Can be `gl`, `wgl`, `glx`, `egl`. Only `gl` is supported for the moment.
//! * Profile: Can be `core` or `compatibility`. `core` will only include all functions supported
//! by the requested version it self, while `compatibility` will include all the functions from
//! previous versions as well.
//! * Version: The requested OpenGL version in the format `x.x`.
//! * Generator: Can be `static` or `struct`.
//! * Extensions (optional): An array of extensions to include in the bindings.
//!
#![crate_name = "gl_generator"]
#![comment = "OpenGL function loader generator."]
#![license = "ASL2"]
#![crate_type = "dylib"]
#![feature(phase)]
#![feature(globs)]
#![feature(macro_rules)]
#![feature(plugin_registrar)]
#![feature(quote)]
#[phase(plugin, link)]
extern crate log;
extern crate khronos_api;
extern crate rustc;
extern crate syntax;
use std::path::Path;
use std::io::{File, Reader};
use syntax::parse::token;
use syntax::ast::{ Item, TokenTree };
use syntax::ext::base::{expr_to_string, get_exprs_from_tts, DummyResult, ExtCtxt, MacResult};
use syntax::codemap::Span;
use registry::*;
use static_gen::StaticGenerator;
use struct_gen::StructGenerator;
mod common;
pub mod static_gen;
pub mod struct_gen;
pub mod registry;
pub mod ty;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut ::rustc::plugin::Registry) {
reg.register_macro("generate_gl_bindings", macro_handler);
}
// this is the object that we will return from the "generate_gl_bindings" macro expansion
struct MacroResult {
content: Vec<::std::gc::Gc<Item>>
}
impl MacResult for MacroResult {
fn make_def(&self) -> Option<::syntax::ext::base::MacroDef> { None }
fn make_expr(&self) -> Option<::std::gc::Gc<::syntax::ast::Expr>> { None }
fn make_pat(&self) -> Option<::std::gc::Gc<::syntax::ast::Pat>> { None }
fn make_stmt(&self) -> Option<::std::gc::Gc<::syntax::ast::Stmt>> { None }
fn make_items(&self) -> Option<::syntax::util::small_vector::SmallVector<::std::gc::Gc<Item>>> {
Some(::syntax::util::small_vector::SmallVector::many(self.content.clone()))
}
}
// handler for generate_gl_bindings!
fn macro_handler(ecx: &mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'static> {
// getting the arguments from the macro
let (api, profile, version, generator, extensions) = match parse_macro_arguments(ecx, span.clone(), token_tree) {
Some(t) => t,
None => return DummyResult::any(span)
};
let (ns, source) = match api.as_slice() {
"gl" => (Gl, khronos_api::GL_XML),
"glx" => {
ecx.span_err(span, "glx generation unimplemented");
return DummyResult::any(span)
},
"wgl" => {
ecx.span_err(span, "wgl generation unimplemented");
return DummyResult::any(span)
}
ns => {
ecx.span_err(span, format!("Unexpected opengl namespace '{}'", ns).as_slice());
return DummyResult::any(span)
}
};
let filter = Some(Filter {
extensions: extensions,
profile: profile,
version: version,
api: api,
});
// generating the registry of all bindings
let reg = {
use std::io::BufReader;
use std::task;
let result = task::try(proc() {
let reader = BufReader::new(source.as_bytes());
Registry::from_xml(reader, ns, filter)
});
match result {
Ok(reg) => reg,
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while parsing the registry");
}
}
return DummyResult::any(span);
}
}
};
// generating the Rust bindings as a source code into "buffer"
let buffer = {
use std::io::MemWriter;
use std::task;
// calling the generator
let result = match generator.as_slice() {
"static" => task::try(proc() {
let mut buffer = MemWriter::new();
StaticGenerator::write(&mut buffer, ®, ns);
buffer
}),
"struct" => task::try(proc() {
let mut buffer = MemWriter::new();
StructGenerator::write(&mut buffer, ®, ns);
buffer
}),
generator => {
ecx.span_err(span, format!("unknown generator type: {}", generator).as_slice());
return DummyResult::any(span);
},
};
// processing the result
match result {
Ok(buffer) => buffer.unwrap(),
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while generating the bindings");
}
}
return DummyResult::any(span);
}
}
};
// creating a new Rust parser from these bindings
let content = match String::from_utf8(buffer) {
Ok(s) => s,
Err(err) => {
ecx.span_err(span, format!("{}", err).as_slice()); | Path::new(ecx.codemap().span_to_filename(span)).display().to_string(), content);
// getting all the items defined by the bindings
let mut items = Vec::new();
loop {
match parser.parse_item_with_outer_attributes() {
None => break,
Some(i) => items.push(i)
}
}
if!parser.eat(&token::EOF) {
ecx.span_err(span, "the rust parser failed to compile all the generated bindings (meaning there is a bug in this library!)");
return DummyResult::any(span)
}
box MacroResult { content: items } as Box<MacResult>
}
fn parse_macro_arguments(ecx: &mut ExtCtxt, span: Span, tts: &[syntax::ast::TokenTree])
-> Option<(String, String, String, String, Vec<String>)>
{
// getting parameters list
let values = match get_exprs_from_tts(ecx, span, tts) {
Some(v) => v,
None => return None
};
if values.len()!= 4 && values.len()!= 5 {
ecx.span_err(span, format!("expected 4 or 5 arguments but got {}", values.len())
.as_slice());
return None;
}
// computing the extensions (last parameter)
let extensions: Vec<String> = match values.as_slice().get(4) {
None => Vec::new(),
Some(vector) => {
use syntax::ast::ExprVec;
match vector.node {
// only [... ] is accepted
ExprVec(ref list) => {
// turning each element into a string
let mut result = Vec::new();
for element in list.iter() {
match expr_to_string(ecx, element.clone(), "expected string literal") {
Some((s, _)) => result.push(s.get().to_string()),
None => return None
}
}
result
},
_ => {
ecx.span_err(span, format!("last argument must be a vector").as_slice());
return None;
}
}
}
};
// computing other parameters
match (
expr_to_string(ecx, values[0].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[1].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[2].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[3].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() })
) {
(Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d, extensions)),
_ => None
}
} | return DummyResult::any(span)
}
};
let mut parser = ::syntax::parse::new_parser_from_source_str(ecx.parse_sess(), ecx.cfg(), | random_line_split |
lib.rs | // Copyright 2013-2014 The gl-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # gl_generator
//!
//! `gl_generator` is an OpenGL bindings generator plugin. It defines a macro named
//! `generate_gl_bindings!` which can be used to generate all constants and functions of a
//! given OpenGL version.
//!
//! ## Example
//!
//! ```rust
//! #[phase(plugin)]
//! extern crate gl_generator;
//! extern crate libc;
//!
//! use std::mem;
//! use self::types::*;
//!
//! generate_gl_bindings!("gl", "core", "4.5", "static", [ "GL_EXT_texture_filter_anisotropic" ])
//! ```
//!
//! ## Parameters
//!
//! * API: Can be `gl`, `wgl`, `glx`, `egl`. Only `gl` is supported for the moment.
//! * Profile: Can be `core` or `compatibility`. `core` will only include all functions supported
//! by the requested version it self, while `compatibility` will include all the functions from
//! previous versions as well.
//! * Version: The requested OpenGL version in the format `x.x`.
//! * Generator: Can be `static` or `struct`.
//! * Extensions (optional): An array of extensions to include in the bindings.
//!
#![crate_name = "gl_generator"]
#![comment = "OpenGL function loader generator."]
#![license = "ASL2"]
#![crate_type = "dylib"]
#![feature(phase)]
#![feature(globs)]
#![feature(macro_rules)]
#![feature(plugin_registrar)]
#![feature(quote)]
#[phase(plugin, link)]
extern crate log;
extern crate khronos_api;
extern crate rustc;
extern crate syntax;
use std::path::Path;
use std::io::{File, Reader};
use syntax::parse::token;
use syntax::ast::{ Item, TokenTree };
use syntax::ext::base::{expr_to_string, get_exprs_from_tts, DummyResult, ExtCtxt, MacResult};
use syntax::codemap::Span;
use registry::*;
use static_gen::StaticGenerator;
use struct_gen::StructGenerator;
mod common;
pub mod static_gen;
pub mod struct_gen;
pub mod registry;
pub mod ty;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut ::rustc::plugin::Registry) {
reg.register_macro("generate_gl_bindings", macro_handler);
}
// this is the object that we will return from the "generate_gl_bindings" macro expansion
struct MacroResult {
content: Vec<::std::gc::Gc<Item>>
}
impl MacResult for MacroResult {
fn make_def(&self) -> Option<::syntax::ext::base::MacroDef> { None }
fn make_expr(&self) -> Option<::std::gc::Gc<::syntax::ast::Expr>> { None }
fn make_pat(&self) -> Option<::std::gc::Gc<::syntax::ast::Pat>> { None }
fn make_stmt(&self) -> Option<::std::gc::Gc<::syntax::ast::Stmt>> { None }
fn make_items(&self) -> Option<::syntax::util::small_vector::SmallVector<::std::gc::Gc<Item>>> {
Some(::syntax::util::small_vector::SmallVector::many(self.content.clone()))
}
}
// handler for generate_gl_bindings!
fn macro_handler(ecx: &mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'static> | }
};
let filter = Some(Filter {
extensions: extensions,
profile: profile,
version: version,
api: api,
});
// generating the registry of all bindings
let reg = {
use std::io::BufReader;
use std::task;
let result = task::try(proc() {
let reader = BufReader::new(source.as_bytes());
Registry::from_xml(reader, ns, filter)
});
match result {
Ok(reg) => reg,
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while parsing the registry");
}
}
return DummyResult::any(span);
}
}
};
// generating the Rust bindings as a source code into "buffer"
let buffer = {
use std::io::MemWriter;
use std::task;
// calling the generator
let result = match generator.as_slice() {
"static" => task::try(proc() {
let mut buffer = MemWriter::new();
StaticGenerator::write(&mut buffer, ®, ns);
buffer
}),
"struct" => task::try(proc() {
let mut buffer = MemWriter::new();
StructGenerator::write(&mut buffer, ®, ns);
buffer
}),
generator => {
ecx.span_err(span, format!("unknown generator type: {}", generator).as_slice());
return DummyResult::any(span);
},
};
// processing the result
match result {
Ok(buffer) => buffer.unwrap(),
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while generating the bindings");
}
}
return DummyResult::any(span);
}
}
};
// creating a new Rust parser from these bindings
let content = match String::from_utf8(buffer) {
Ok(s) => s,
Err(err) => {
ecx.span_err(span, format!("{}", err).as_slice());
return DummyResult::any(span)
}
};
let mut parser = ::syntax::parse::new_parser_from_source_str(ecx.parse_sess(), ecx.cfg(),
Path::new(ecx.codemap().span_to_filename(span)).display().to_string(), content);
// getting all the items defined by the bindings
let mut items = Vec::new();
loop {
match parser.parse_item_with_outer_attributes() {
None => break,
Some(i) => items.push(i)
}
}
if!parser.eat(&token::EOF) {
ecx.span_err(span, "the rust parser failed to compile all the generated bindings (meaning there is a bug in this library!)");
return DummyResult::any(span)
}
box MacroResult { content: items } as Box<MacResult>
}
fn parse_macro_arguments(ecx: &mut ExtCtxt, span: Span, tts: &[syntax::ast::TokenTree])
-> Option<(String, String, String, String, Vec<String>)>
{
// getting parameters list
let values = match get_exprs_from_tts(ecx, span, tts) {
Some(v) => v,
None => return None
};
if values.len()!= 4 && values.len()!= 5 {
ecx.span_err(span, format!("expected 4 or 5 arguments but got {}", values.len())
.as_slice());
return None;
}
// computing the extensions (last parameter)
let extensions: Vec<String> = match values.as_slice().get(4) {
None => Vec::new(),
Some(vector) => {
use syntax::ast::ExprVec;
match vector.node {
// only [... ] is accepted
ExprVec(ref list) => {
// turning each element into a string
let mut result = Vec::new();
for element in list.iter() {
match expr_to_string(ecx, element.clone(), "expected string literal") {
Some((s, _)) => result.push(s.get().to_string()),
None => return None
}
}
result
},
_ => {
ecx.span_err(span, format!("last argument must be a vector").as_slice());
return None;
}
}
}
};
// computing other parameters
match (
expr_to_string(ecx, values[0].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[1].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[2].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[3].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() })
) {
(Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d, extensions)),
_ => None
}
}
| {
// getting the arguments from the macro
let (api, profile, version, generator, extensions) = match parse_macro_arguments(ecx, span.clone(), token_tree) {
Some(t) => t,
None => return DummyResult::any(span)
};
let (ns, source) = match api.as_slice() {
"gl" => (Gl, khronos_api::GL_XML),
"glx" => {
ecx.span_err(span, "glx generation unimplemented");
return DummyResult::any(span)
},
"wgl" => {
ecx.span_err(span, "wgl generation unimplemented");
return DummyResult::any(span)
}
ns => {
ecx.span_err(span, format!("Unexpected opengl namespace '{}'", ns).as_slice());
return DummyResult::any(span) | identifier_body |
lib.rs | #![deny(warnings)]
pub mod corgi;
pub mod dict;
#[cfg(test)]
pub mod tests;
use crate::corgi::{decode, encode, Corgi, CorgiDTO, CorgiId, CorgiKey, Rarity};
use crate::dict::Dict;
use near_env::near_envlog;
use near_sdk::{
borsh::{self, BorshDeserialize, BorshSerialize},
collections::UnorderedMap,
env,
json_types::U64,
near_bindgen,
wee_alloc::WeeAlloc,
AccountId, Balance, Promise,
};
use std::{convert::TryInto, mem::size_of, usize};
#[global_allocator]
static ALLOC: WeeAlloc = WeeAlloc::INIT;
/// Fee to pay (in yocto Ⓝ) to allow the user to store Corgis on-chain.
/// This value can be set by modifiying the `mint_fee` field in `config.json`.
const MINT_FEE: u128 = include!(concat!(env!("OUT_DIR"), "/mint_fee.val"));
/// Indicates how many Corgi are returned at most in the `get_global_corgis` method.
/// This value can be set by modifiying the `page_limit` field in `config.json`.
const PAGE_LIMIT: u32 = include!(concat!(env!("OUT_DIR"), "/page_limit.val"));
/// Keys used to identify our structures within the NEAR blockchain.
const CORGIS: &[u8] = b"a";
const CORGIS_BY_OWNER: &[u8] = b"b";
const CORGIS_BY_OWNER_PREFIX: &str = "B";
const AUCTIONS: &[u8] = b"d";
const AUCTIONS_PREFIX: &str = "D";
/// Holds our data model.
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Model {
/// A mapping from `CorgiKey` to `Corgi` to have quick access to corgis.
/// `Dict` is used to keep corgis sorted by creation timestamp.
corgis: Dict<CorgiKey, Corgi>,
/// Represents which account holds which `Corgi`.
/// Each account can own several corgis.
/// The inner `Dict` acts as a set, since it is mapped to `()`.
corgis_by_owner: UnorderedMap<AccountId, Dict<CorgiKey, ()>>,
/// Internal structure to store auctions for a given corgi.
/// It is a mapping from `CorgiKey` to a tuple.
/// The first component of the tuple is a `Dict`, which represents the bids for that corgi.
/// Each entry in this `Dict` maps the bidder (`AccountId`) to the bid price and bidding timestamp.
/// The seconds component of the tuple represents the expiration of the auction,
/// as a timestamp in nanoseconds.
auctions: UnorderedMap<CorgiKey, (Dict<AccountId, (Balance, u64)>, u64)>,
}
impl Default for Model {
fn default() -> Self {
env::log(format!("init v{}", env!("CARGO_PKG_VERSION")).as_bytes());
Self {
corgis: Dict::new(CORGIS.to_vec()),
corgis_by_owner: UnorderedMap::new(CORGIS_BY_OWNER.to_vec()),
auctions: UnorderedMap::new(AUCTIONS.to_vec()),
}
}
}
#[near_bindgen]
#[near_envlog(skip_args, only_pub)]
impl Model {
/// Creates a `Corgi` under the `predecessor_account_id`.
/// Returns the newly generated `Corgi`
/// The corgi `id` is encoded using base58.
/// This method is `payable` because the caller needs to cover the cost to mint the corgi.
/// The corresponding `attached_deposit` must be `MINT_FEE`.
#[payable]
pub fn create_corgi(
&mut self,
name: String,
quote: String,
color: String,
background_color: String,
) -> CorgiDTO {
let owner = env::predecessor_account_id();
let deposit = env::attached_deposit();
if deposit!= MINT_FEE {
panic!("Deposit must be MINT_FEE but was {}", deposit)
}
macro_rules! check {
($value:ident, $max:expr, $message:expr) => {{
if $value.len() > $max {
env::panic($message.as_bytes());
}
}};
}
check!(name, 32, "Name too large");
check!(quote, 256, "Quote too large");
check!(color, 64, "Color too large");
check!(background_color, 64, "Backcolor too large");
let now = env::block_timestamp();
let key = env::random_seed()[..size_of::<CorgiKey>()]
.try_into()
.unwrap();
let corgi = Corgi {
id: encode(key),
name,
quote,
color,
background_color,
rate: Rarity::from_seed(env::random_seed()),
owner,
created: now,
modified: now,
sender: "".to_string(),
};
CorgiDTO::new(self.push_corgi(key, corgi))
}
/// Gets `Corgi` by the given `id`.
/// Panics if `id` is not found.
pub fn get_corgi_by_id(&self, id: CorgiId) -> CorgiDTO {
let (key, corgi) = self.get_corgi(&id);
self.get_for_sale(key, corgi)
}
/// Gets all `Corgi`s owned by the `owner` account id.
/// Empty `vec` if `owner` does not hold any `Corgi`.
pub fn get_corgis_by_owner(&self, owner: AccountId) -> Vec<CorgiDTO> {
match self.corgis_by_owner.get(&owner) {
None => Vec::new(),
Some(list) => list
.into_iter()
.map(|(key, _)| {
let maybe_corgi = self.corgis.get(&key);
assert!(maybe_corgi.is_some());
let corgi = maybe_corgi.unwrap();
assert!(corgi.id == encode(key));
assert!(corgi.owner == owner);
self.get_for_sale(key, corgi)
})
.collect(),
}
}
/// Delete the `Corgi` by its `id`.
/// Only the `owner` of the `Corgi` can delete it.
pub fn delete_corgi(&mut self, id: CorgiId) {
let owner = env::predecessor_account_id();
self.delete_corgi_from(id, owner);
}
/// Internal method to delete the corgi with `id` owned by `owner`.
/// Panics if `owner` does not own the corgi with `id`.
fn delete_corgi_from(&mut self, id: CorgiId, owner: AccountId) {
match self.corgis_by_owner.get(&owner) {
None => env::panic("You do not have corgis to delete from".as_bytes()),
Some(mut list) => {
let key = decode(&id);
self.panic_if_corgi_is_locked(key);
if list.remove(&key).is_none() {
env::panic("Corgi id does not belong to account".as_bytes());
}
self.corgis_by_owner.insert(&owner, &list);
let was_removed = self.corgis.remove(&key);
assert!(was_removed.is_some());
}
}
}
/// Returns a list of all `Corgi`s that have been created.
/// Number of `Corgi`s returned is limited by `PAGE_LIMIT`.
pub fn get_global_corgis(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, corgi) in &self.corgis {
if result.len() >= PAGE_LIMIT as usize {
break;
}
result.push(self.get_for_sale(key, corgi));
}
result
}
/// Transfer the Corgi with the given `id` to `receiver`.
/// Only the `owner` of the corgi can make such a transfer.
pub fn transfer_corgi(&mut self, receiver: AccountId, id: CorgiId) {
if!env::is_valid_account_id(receiver.as_bytes()) {
env::panic("Invalid receiver account id".as_bytes());
}
let sender = env::predecessor_account_id();
if sender == receiver {
env::panic("Self transfers are not allowed".as_bytes());
}
let (key, corgi) = self.get_corgi(&id);
assert_eq!(corgi.id, id);
if sender!= corgi.owner {
env::panic("Sender must own Corgi".as_bytes());
}
self.panic_if_corgi_is_locked(key);
self.move_corgi(key, id, sender, receiver, corgi)
}
/// Returns all `Corgi`s currently for sale.
/// That is, all `Corgi`s which are in auction.
pub fn get_items_for_sale(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, item) in self.auctions.iter() {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
let corgi = corgi.unwrap();
result.push(CorgiDTO::for_sale(corgi, item));
}
result
}
/// Puts the given `Corgi` for sale.
/// The `duration` indicates for how long the auction should last, in seconds.
pub fn add_item_for_sale(&mut self, token_id: CorgiId, duration: u32) -> U64 {
let (key, corgi) = self.get_corgi(&token_id);
if corgi.owner!= env::predecessor_account_id() {
env::panic("Only token owner can add item for sale".as_bytes())
}
if let None = self.auctions.get(&key) {
let bids = Dict::new(get_collection_key(AUCTIONS_PREFIX, token_id));
let expires = env::block_timestamp() + duration as u64 * 1_000_000_000;
self.auctions.insert(&key, &(bids, expires));
U64(expires)
} else {
env::panic("Corgi already for sale".as_bytes());
}
}
/// Makes a bid for a `Corgi` already in auction.
/// This is a `payable` method, meaning the contract will escrow the `attached_deposit`
/// until the auction ends.
#[payable]
pub fn bid_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let bidder = env::predecessor_account_id();
if bidder == self.corgis.get(&key).expect("Corgi not found").owner {
env::panic("You cannot bid for your own Corgi".as_bytes())
}
if env::block_timestamp() > auction_ends {
env::panic("Auction for corgi has expired".as_bytes())
}
let price = env::attached_deposit() + bids.get(&bidder).map(|(p, _)| p).unwrap_or_default();
let top_price = bids.into_iter().next().map(|(_, (p, _))| p).unwrap_or(0);
if price <= top_price {
panic!("Bid {} does not cover top bid {}", price, top_price)
}
bids.remove(&bidder);
bids.push_front(&bidder, (price, env::block_timestamp()));
self.auctions.insert(&key, &(bids, auction_ends));
}
/// Makes a clearance for the given `Corgi`.
/// Only the corgi `owner` or the highest bidder can end an auction after it expires.
/// All other bidders can get their money back when calling this method.
pub fn clearance_for_item(&mut self, token_id: CorgiId) {
| let mut it = bids.into_iter();
let signer = env::predecessor_account_id();
if signer == owner.clone() {
if let Some((bidder, (price, _timestamp))) = it.next() {
end_auction(it, bidder, price);
} else {
self.auctions.remove(&key);
}
} else {
if let Some((bidder, (price, _timestamp))) = it.next() {
if bidder == signer {
end_auction(it, bidder, price);
return;
}
}
match bids.remove(&signer) {
None => env::panic("Cannot clear an item if not bidding for it".as_bytes()),
Some((price, _)) => Promise::new(signer).transfer(price),
};
}
}
/// Internal method to transfer a corgi.
fn move_corgi(
&mut self,
key: CorgiKey,
id: CorgiId,
old_owner: AccountId,
new_owner: AccountId,
mut corgi: Corgi,
) {
self.delete_corgi_from(id, old_owner.clone());
corgi.owner = new_owner;
corgi.sender = old_owner;
corgi.modified = env::block_timestamp();
self.push_corgi(key, corgi);
}
/// Gets the `Corgi` with `id`.
fn get_corgi(&self, id: &CorgiId) -> (CorgiKey, Corgi) {
let key = decode(id);
match self.corgis.get(&key) {
None => env::panic("Given corgi id was not found".as_bytes()),
Some(corgi) => {
assert!(corgi.id == *id);
(key, corgi)
}
}
}
/// Gets auction information for the `Corgi` with `token_id` or panics.
fn get_auction(&self, token_id: &CorgiId) -> (CorgiKey, Dict<AccountId, (u128, u64)>, u64) {
let key = decode(&token_id);
match self.auctions.get(&key) {
None => env::panic("Corgi is not available for sale".as_bytes()),
Some((bids, expires)) => (key, bids, expires),
}
}
/// Gets sale information for a given `Corgi`.
fn get_for_sale(&self, key: CorgiKey, corgi: Corgi) -> CorgiDTO {
match self.auctions.get(&key) {
None => CorgiDTO::new(corgi),
Some(item) => CorgiDTO::for_sale(corgi, item),
}
}
/// Inserts a `Corgi` into the top the dictionary.
fn push_corgi(&mut self, key: CorgiKey, corgi: Corgi) -> Corgi {
env::log("push_corgi".as_bytes());
let corgi = self.corgis.push_front(&key, corgi);
let mut ids = self.corgis_by_owner.get(&corgi.owner).unwrap_or_else(|| {
Dict::new(get_collection_key(
CORGIS_BY_OWNER_PREFIX,
corgi.owner.clone(),
))
});
ids.push_front(&key, ());
self.corgis_by_owner.insert(&corgi.owner, &ids);
corgi
}
/// Ensures the given `Corgi` with `key` is not for sale.
fn panic_if_corgi_is_locked(&self, key: CorgiKey) {
if self.auctions.get(&key).is_some() {
env::panic("Corgi is currently locked".as_bytes());
}
}
}
fn get_collection_key(prefix: &str, mut key: String) -> Vec<u8> {
key.insert_str(0, prefix);
key.as_bytes().to_vec()
}
| let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let corgi = {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
corgi.unwrap()
};
let owner = corgi.owner.clone();
let end_auction = |it, bidder, price| {
if env::block_timestamp() <= auction_ends {
env::panic("Token still in auction".as_bytes())
}
self.auctions.remove(&key);
self.move_corgi(key, token_id, owner.clone(), bidder, corgi);
Promise::new(owner.clone()).transfer(price);
for (bidder, (price, _timestamp)) in it {
Promise::new(bidder).transfer(price);
}
}; | identifier_body |
lib.rs | #![deny(warnings)]
pub mod corgi;
pub mod dict;
#[cfg(test)]
pub mod tests;
use crate::corgi::{decode, encode, Corgi, CorgiDTO, CorgiId, CorgiKey, Rarity};
use crate::dict::Dict;
use near_env::near_envlog;
use near_sdk::{
borsh::{self, BorshDeserialize, BorshSerialize},
collections::UnorderedMap,
env,
json_types::U64,
near_bindgen,
wee_alloc::WeeAlloc,
AccountId, Balance, Promise,
};
use std::{convert::TryInto, mem::size_of, usize};
#[global_allocator]
static ALLOC: WeeAlloc = WeeAlloc::INIT;
/// Fee to pay (in yocto Ⓝ) to allow the user to store Corgis on-chain.
/// This value can be set by modifiying the `mint_fee` field in `config.json`.
const MINT_FEE: u128 = include!(concat!(env!("OUT_DIR"), "/mint_fee.val"));
/// Indicates how many Corgi are returned at most in the `get_global_corgis` method.
/// This value can be set by modifiying the `page_limit` field in `config.json`.
const PAGE_LIMIT: u32 = include!(concat!(env!("OUT_DIR"), "/page_limit.val"));
/// Keys used to identify our structures within the NEAR blockchain.
const CORGIS: &[u8] = b"a";
const CORGIS_BY_OWNER: &[u8] = b"b";
const CORGIS_BY_OWNER_PREFIX: &str = "B";
const AUCTIONS: &[u8] = b"d";
const AUCTIONS_PREFIX: &str = "D";
/// Holds our data model.
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Model {
/// A mapping from `CorgiKey` to `Corgi` to have quick access to corgis.
/// `Dict` is used to keep corgis sorted by creation timestamp.
corgis: Dict<CorgiKey, Corgi>,
/// Represents which account holds which `Corgi`.
/// Each account can own several corgis.
/// The inner `Dict` acts as a set, since it is mapped to `()`.
corgis_by_owner: UnorderedMap<AccountId, Dict<CorgiKey, ()>>,
/// Internal structure to store auctions for a given corgi.
/// It is a mapping from `CorgiKey` to a tuple.
/// The first component of the tuple is a `Dict`, which represents the bids for that corgi.
/// Each entry in this `Dict` maps the bidder (`AccountId`) to the bid price and bidding timestamp.
/// The seconds component of the tuple represents the expiration of the auction,
/// as a timestamp in nanoseconds.
auctions: UnorderedMap<CorgiKey, (Dict<AccountId, (Balance, u64)>, u64)>,
}
impl Default for Model {
fn default() -> Self {
env::log(format!("init v{}", env!("CARGO_PKG_VERSION")).as_bytes());
Self {
corgis: Dict::new(CORGIS.to_vec()),
corgis_by_owner: UnorderedMap::new(CORGIS_BY_OWNER.to_vec()),
auctions: UnorderedMap::new(AUCTIONS.to_vec()),
}
}
}
#[near_bindgen]
#[near_envlog(skip_args, only_pub)]
impl Model {
/// Creates a `Corgi` under the `predecessor_account_id`.
/// Returns the newly generated `Corgi`
/// The corgi `id` is encoded using base58.
/// This method is `payable` because the caller needs to cover the cost to mint the corgi.
/// The corresponding `attached_deposit` must be `MINT_FEE`.
#[payable]
pub fn create_corgi(
&mut self,
name: String,
quote: String,
color: String,
background_color: String,
) -> CorgiDTO {
let owner = env::predecessor_account_id();
let deposit = env::attached_deposit();
if deposit!= MINT_FEE {
panic!("Deposit must be MINT_FEE but was {}", deposit)
}
macro_rules! check {
($value:ident, $max:expr, $message:expr) => {{
if $value.len() > $max {
env::panic($message.as_bytes());
}
}};
}
check!(name, 32, "Name too large");
check!(quote, 256, "Quote too large");
check!(color, 64, "Color too large");
check!(background_color, 64, "Backcolor too large");
let now = env::block_timestamp();
let key = env::random_seed()[..size_of::<CorgiKey>()]
.try_into()
.unwrap();
let corgi = Corgi {
id: encode(key),
name,
quote,
color,
background_color,
rate: Rarity::from_seed(env::random_seed()),
owner,
created: now,
modified: now,
sender: "".to_string(),
};
CorgiDTO::new(self.push_corgi(key, corgi))
}
/// Gets `Corgi` by the given `id`.
/// Panics if `id` is not found.
pub fn get_corgi_by_id(&self, id: CorgiId) -> CorgiDTO {
let (key, corgi) = self.get_corgi(&id);
self.get_for_sale(key, corgi)
}
/// Gets all `Corgi`s owned by the `owner` account id.
/// Empty `vec` if `owner` does not hold any `Corgi`.
pub fn get_corgis_by_owner(&self, owner: AccountId) -> Vec<CorgiDTO> {
match self.corgis_by_owner.get(&owner) {
None => Vec::new(),
Some(list) => list
.into_iter()
.map(|(key, _)| {
let maybe_corgi = self.corgis.get(&key);
assert!(maybe_corgi.is_some());
let corgi = maybe_corgi.unwrap();
assert!(corgi.id == encode(key));
assert!(corgi.owner == owner);
self.get_for_sale(key, corgi)
})
.collect(),
}
}
/// Delete the `Corgi` by its `id`.
/// Only the `owner` of the `Corgi` can delete it.
pub fn delete_corgi(&mut self, id: CorgiId) {
let owner = env::predecessor_account_id();
self.delete_corgi_from(id, owner);
}
/// Internal method to delete the corgi with `id` owned by `owner`.
/// Panics if `owner` does not own the corgi with `id`.
fn delete_corgi_from(&mut self, id: CorgiId, owner: AccountId) {
match self.corgis_by_owner.get(&owner) {
None => env::panic("You do not have corgis to delete from".as_bytes()),
Some(mut list) => {
let key = decode(&id);
self.panic_if_corgi_is_locked(key);
if list.remove(&key).is_none() {
env::panic("Corgi id does not belong to account".as_bytes());
}
self.corgis_by_owner.insert(&owner, &list);
let was_removed = self.corgis.remove(&key);
assert!(was_removed.is_some());
}
}
}
/// Returns a list of all `Corgi`s that have been created.
/// Number of `Corgi`s returned is limited by `PAGE_LIMIT`.
pub fn get_global_corgis(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, corgi) in &self.corgis {
if result.len() >= PAGE_LIMIT as usize {
break;
}
result.push(self.get_for_sale(key, corgi));
}
result
}
/// Transfer the Corgi with the given `id` to `receiver`.
/// Only the `owner` of the corgi can make such a transfer.
pub fn transfer_corgi(&mut self, receiver: AccountId, id: CorgiId) {
if!env::is_valid_account_id(receiver.as_bytes()) {
env::panic("Invalid receiver account id".as_bytes());
}
let sender = env::predecessor_account_id();
if sender == receiver {
env::panic("Self transfers are not allowed".as_bytes());
}
let (key, corgi) = self.get_corgi(&id);
assert_eq!(corgi.id, id);
if sender!= corgi.owner {
env::panic("Sender must own Corgi".as_bytes());
}
self.panic_if_corgi_is_locked(key);
self.move_corgi(key, id, sender, receiver, corgi)
}
/// Returns all `Corgi`s currently for sale.
/// That is, all `Corgi`s which are in auction.
pub fn get_items_for_sale(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, item) in self.auctions.iter() {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
let corgi = corgi.unwrap();
result.push(CorgiDTO::for_sale(corgi, item));
}
result
}
/// Puts the given `Corgi` for sale.
/// The `duration` indicates for how long the auction should last, in seconds.
pub fn add_item_for_sale(&mut self, token_id: CorgiId, duration: u32) -> U64 {
let (key, corgi) = self.get_corgi(&token_id);
if corgi.owner!= env::predecessor_account_id() {
env::panic("Only token owner can add item for sale".as_bytes())
}
if let None = self.auctions.get(&key) {
let bids = Dict::new(get_collection_key(AUCTIONS_PREFIX, token_id));
let expires = env::block_timestamp() + duration as u64 * 1_000_000_000;
self.auctions.insert(&key, &(bids, expires));
U64(expires)
} else {
env::panic("Corgi already for sale".as_bytes());
}
}
/// Makes a bid for a `Corgi` already in auction.
/// This is a `payable` method, meaning the contract will escrow the `attached_deposit`
/// until the auction ends.
#[payable]
pub fn bid_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let bidder = env::predecessor_account_id();
if bidder == self.corgis.get(&key).expect("Corgi not found").owner {
env::panic("You cannot bid for your own Corgi".as_bytes())
}
if env::block_timestamp() > auction_ends {
env::panic("Auction for corgi has expired".as_bytes())
}
let price = env::attached_deposit() + bids.get(&bidder).map(|(p, _)| p).unwrap_or_default();
let top_price = bids.into_iter().next().map(|(_, (p, _))| p).unwrap_or(0);
if price <= top_price {
panic!("Bid {} does not cover top bid {}", price, top_price)
}
bids.remove(&bidder);
bids.push_front(&bidder, (price, env::block_timestamp()));
self.auctions.insert(&key, &(bids, auction_ends));
}
/// Makes a clearance for the given `Corgi`.
/// Only the corgi `owner` or the highest bidder can end an auction after it expires.
/// All other bidders can get their money back when calling this method.
pub fn clearance_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let corgi = {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
corgi.unwrap()
};
let owner = corgi.owner.clone();
let end_auction = |it, bidder, price| {
if env::block_timestamp() <= auction_ends {
env::panic("Token still in auction".as_bytes())
}
self.auctions.remove(&key);
self.move_corgi(key, token_id, owner.clone(), bidder, corgi);
Promise::new(owner.clone()).transfer(price);
for (bidder, (price, _timestamp)) in it {
Promise::new(bidder).transfer(price);
}
};
let mut it = bids.into_iter();
let signer = env::predecessor_account_id();
if signer == owner.clone() {
if let Some((bidder, (price, _timestamp))) = it.next() {
end_auction(it, bidder, price);
} else {
self.auctions.remove(&key);
}
} else {
if let Some((bidder, (price, _timestamp))) = it.next() {
if bidder == signer {
end_auction(it, bidder, price);
return;
}
}
match bids.remove(&signer) {
None => env::panic("Cannot clear an item if not bidding for it".as_bytes()),
Some((price, _)) => Promise::new(signer).transfer(price),
};
}
}
/// Internal method to transfer a corgi.
fn move_corgi(
&mut self,
key: CorgiKey,
id: CorgiId,
old_owner: AccountId,
new_owner: AccountId,
mut corgi: Corgi,
) {
self.delete_corgi_from(id, old_owner.clone());
corgi.owner = new_owner;
corgi.sender = old_owner;
corgi.modified = env::block_timestamp();
self.push_corgi(key, corgi);
}
/// Gets the `Corgi` with `id`.
fn get_corgi(&self, id: &CorgiId) -> (CorgiKey, Corgi) {
let key = decode(id);
match self.corgis.get(&key) {
None => env::panic("Given corgi id was not found".as_bytes()),
Some(corgi) => {
assert!(corgi.id == *id);
(key, corgi)
}
}
}
/// Gets auction information for the `Corgi` with `token_id` or panics.
fn get_auction(&self, token_id: &CorgiId) -> (CorgiKey, Dict<AccountId, (u128, u64)>, u64) {
let key = decode(&token_id);
match self.auctions.get(&key) {
None => env::panic("Corgi is not available for sale".as_bytes()),
Some((bids, expires)) => (key, bids, expires),
}
}
/// Gets sale information for a given `Corgi`.
fn get_for_sale(&self, key: CorgiKey, corgi: Corgi) -> CorgiDTO {
match self.auctions.get(&key) {
None => CorgiDTO::new(corgi),
Some(item) => CorgiDTO::for_sale(corgi, item),
}
}
/// Inserts a `Corgi` into the top the dictionary.
fn push_corgi(&mut self, key: CorgiKey, corgi: Corgi) -> Corgi {
env::log("push_corgi".as_bytes());
let corgi = self.corgis.push_front(&key, corgi);
let mut ids = self.corgis_by_owner.get(&corgi.owner).unwrap_or_else(|| {
Dict::new(get_collection_key(
CORGIS_BY_OWNER_PREFIX,
corgi.owner.clone(),
))
});
ids.push_front(&key, ());
self.corgis_by_owner.insert(&corgi.owner, &ids);
corgi
}
/// Ensures the given `Corgi` with `key` is not for sale.
fn panic_if_corgi_is_locked(&self, key: CorgiKey) {
if self.auctions.get(&key).is_some() {
env::panic("Corgi is currently locked".as_bytes());
}
} | }
fn get_collection_key(prefix: &str, mut key: String) -> Vec<u8> {
key.insert_str(0, prefix);
key.as_bytes().to_vec()
} | random_line_split |
|
lib.rs | #![deny(warnings)]
pub mod corgi;
pub mod dict;
#[cfg(test)]
pub mod tests;
use crate::corgi::{decode, encode, Corgi, CorgiDTO, CorgiId, CorgiKey, Rarity};
use crate::dict::Dict;
use near_env::near_envlog;
use near_sdk::{
borsh::{self, BorshDeserialize, BorshSerialize},
collections::UnorderedMap,
env,
json_types::U64,
near_bindgen,
wee_alloc::WeeAlloc,
AccountId, Balance, Promise,
};
use std::{convert::TryInto, mem::size_of, usize};
#[global_allocator]
static ALLOC: WeeAlloc = WeeAlloc::INIT;
/// Fee to pay (in yocto Ⓝ) to allow the user to store Corgis on-chain.
/// This value can be set by modifiying the `mint_fee` field in `config.json`.
const MINT_FEE: u128 = include!(concat!(env!("OUT_DIR"), "/mint_fee.val"));
/// Indicates how many Corgi are returned at most in the `get_global_corgis` method.
/// This value can be set by modifiying the `page_limit` field in `config.json`.
const PAGE_LIMIT: u32 = include!(concat!(env!("OUT_DIR"), "/page_limit.val"));
/// Keys used to identify our structures within the NEAR blockchain.
const CORGIS: &[u8] = b"a";
const CORGIS_BY_OWNER: &[u8] = b"b";
const CORGIS_BY_OWNER_PREFIX: &str = "B";
const AUCTIONS: &[u8] = b"d";
const AUCTIONS_PREFIX: &str = "D";
/// Holds our data model.
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Model {
/// A mapping from `CorgiKey` to `Corgi` to have quick access to corgis.
/// `Dict` is used to keep corgis sorted by creation timestamp.
corgis: Dict<CorgiKey, Corgi>,
/// Represents which account holds which `Corgi`.
/// Each account can own several corgis.
/// The inner `Dict` acts as a set, since it is mapped to `()`.
corgis_by_owner: UnorderedMap<AccountId, Dict<CorgiKey, ()>>,
/// Internal structure to store auctions for a given corgi.
/// It is a mapping from `CorgiKey` to a tuple.
/// The first component of the tuple is a `Dict`, which represents the bids for that corgi.
/// Each entry in this `Dict` maps the bidder (`AccountId`) to the bid price and bidding timestamp.
/// The seconds component of the tuple represents the expiration of the auction,
/// as a timestamp in nanoseconds.
auctions: UnorderedMap<CorgiKey, (Dict<AccountId, (Balance, u64)>, u64)>,
}
impl Default for Model {
fn de | -> Self {
env::log(format!("init v{}", env!("CARGO_PKG_VERSION")).as_bytes());
Self {
corgis: Dict::new(CORGIS.to_vec()),
corgis_by_owner: UnorderedMap::new(CORGIS_BY_OWNER.to_vec()),
auctions: UnorderedMap::new(AUCTIONS.to_vec()),
}
}
}
#[near_bindgen]
#[near_envlog(skip_args, only_pub)]
impl Model {
/// Creates a `Corgi` under the `predecessor_account_id`.
/// Returns the newly generated `Corgi`
/// The corgi `id` is encoded using base58.
/// This method is `payable` because the caller needs to cover the cost to mint the corgi.
/// The corresponding `attached_deposit` must be `MINT_FEE`.
#[payable]
pub fn create_corgi(
&mut self,
name: String,
quote: String,
color: String,
background_color: String,
) -> CorgiDTO {
let owner = env::predecessor_account_id();
let deposit = env::attached_deposit();
if deposit!= MINT_FEE {
panic!("Deposit must be MINT_FEE but was {}", deposit)
}
macro_rules! check {
($value:ident, $max:expr, $message:expr) => {{
if $value.len() > $max {
env::panic($message.as_bytes());
}
}};
}
check!(name, 32, "Name too large");
check!(quote, 256, "Quote too large");
check!(color, 64, "Color too large");
check!(background_color, 64, "Backcolor too large");
let now = env::block_timestamp();
let key = env::random_seed()[..size_of::<CorgiKey>()]
.try_into()
.unwrap();
let corgi = Corgi {
id: encode(key),
name,
quote,
color,
background_color,
rate: Rarity::from_seed(env::random_seed()),
owner,
created: now,
modified: now,
sender: "".to_string(),
};
CorgiDTO::new(self.push_corgi(key, corgi))
}
/// Gets `Corgi` by the given `id`.
/// Panics if `id` is not found.
pub fn get_corgi_by_id(&self, id: CorgiId) -> CorgiDTO {
let (key, corgi) = self.get_corgi(&id);
self.get_for_sale(key, corgi)
}
/// Gets all `Corgi`s owned by the `owner` account id.
/// Empty `vec` if `owner` does not hold any `Corgi`.
pub fn get_corgis_by_owner(&self, owner: AccountId) -> Vec<CorgiDTO> {
match self.corgis_by_owner.get(&owner) {
None => Vec::new(),
Some(list) => list
.into_iter()
.map(|(key, _)| {
let maybe_corgi = self.corgis.get(&key);
assert!(maybe_corgi.is_some());
let corgi = maybe_corgi.unwrap();
assert!(corgi.id == encode(key));
assert!(corgi.owner == owner);
self.get_for_sale(key, corgi)
})
.collect(),
}
}
/// Delete the `Corgi` by its `id`.
/// Only the `owner` of the `Corgi` can delete it.
pub fn delete_corgi(&mut self, id: CorgiId) {
let owner = env::predecessor_account_id();
self.delete_corgi_from(id, owner);
}
/// Internal method to delete the corgi with `id` owned by `owner`.
/// Panics if `owner` does not own the corgi with `id`.
fn delete_corgi_from(&mut self, id: CorgiId, owner: AccountId) {
match self.corgis_by_owner.get(&owner) {
None => env::panic("You do not have corgis to delete from".as_bytes()),
Some(mut list) => {
let key = decode(&id);
self.panic_if_corgi_is_locked(key);
if list.remove(&key).is_none() {
env::panic("Corgi id does not belong to account".as_bytes());
}
self.corgis_by_owner.insert(&owner, &list);
let was_removed = self.corgis.remove(&key);
assert!(was_removed.is_some());
}
}
}
/// Returns a list of all `Corgi`s that have been created.
/// Number of `Corgi`s returned is limited by `PAGE_LIMIT`.
pub fn get_global_corgis(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, corgi) in &self.corgis {
if result.len() >= PAGE_LIMIT as usize {
break;
}
result.push(self.get_for_sale(key, corgi));
}
result
}
/// Transfer the Corgi with the given `id` to `receiver`.
/// Only the `owner` of the corgi can make such a transfer.
pub fn transfer_corgi(&mut self, receiver: AccountId, id: CorgiId) {
if!env::is_valid_account_id(receiver.as_bytes()) {
env::panic("Invalid receiver account id".as_bytes());
}
let sender = env::predecessor_account_id();
if sender == receiver {
env::panic("Self transfers are not allowed".as_bytes());
}
let (key, corgi) = self.get_corgi(&id);
assert_eq!(corgi.id, id);
if sender!= corgi.owner {
env::panic("Sender must own Corgi".as_bytes());
}
self.panic_if_corgi_is_locked(key);
self.move_corgi(key, id, sender, receiver, corgi)
}
/// Returns all `Corgi`s currently for sale.
/// That is, all `Corgi`s which are in auction.
pub fn get_items_for_sale(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, item) in self.auctions.iter() {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
let corgi = corgi.unwrap();
result.push(CorgiDTO::for_sale(corgi, item));
}
result
}
/// Puts the given `Corgi` for sale.
/// The `duration` indicates for how long the auction should last, in seconds.
pub fn add_item_for_sale(&mut self, token_id: CorgiId, duration: u32) -> U64 {
let (key, corgi) = self.get_corgi(&token_id);
if corgi.owner!= env::predecessor_account_id() {
env::panic("Only token owner can add item for sale".as_bytes())
}
if let None = self.auctions.get(&key) {
let bids = Dict::new(get_collection_key(AUCTIONS_PREFIX, token_id));
let expires = env::block_timestamp() + duration as u64 * 1_000_000_000;
self.auctions.insert(&key, &(bids, expires));
U64(expires)
} else {
env::panic("Corgi already for sale".as_bytes());
}
}
/// Makes a bid for a `Corgi` already in auction.
/// This is a `payable` method, meaning the contract will escrow the `attached_deposit`
/// until the auction ends.
#[payable]
pub fn bid_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let bidder = env::predecessor_account_id();
if bidder == self.corgis.get(&key).expect("Corgi not found").owner {
env::panic("You cannot bid for your own Corgi".as_bytes())
}
if env::block_timestamp() > auction_ends {
env::panic("Auction for corgi has expired".as_bytes())
}
let price = env::attached_deposit() + bids.get(&bidder).map(|(p, _)| p).unwrap_or_default();
let top_price = bids.into_iter().next().map(|(_, (p, _))| p).unwrap_or(0);
if price <= top_price {
panic!("Bid {} does not cover top bid {}", price, top_price)
}
bids.remove(&bidder);
bids.push_front(&bidder, (price, env::block_timestamp()));
self.auctions.insert(&key, &(bids, auction_ends));
}
/// Makes a clearance for the given `Corgi`.
/// Only the corgi `owner` or the highest bidder can end an auction after it expires.
/// All other bidders can get their money back when calling this method.
pub fn clearance_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let corgi = {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
corgi.unwrap()
};
let owner = corgi.owner.clone();
let end_auction = |it, bidder, price| {
if env::block_timestamp() <= auction_ends {
env::panic("Token still in auction".as_bytes())
}
self.auctions.remove(&key);
self.move_corgi(key, token_id, owner.clone(), bidder, corgi);
Promise::new(owner.clone()).transfer(price);
for (bidder, (price, _timestamp)) in it {
Promise::new(bidder).transfer(price);
}
};
let mut it = bids.into_iter();
let signer = env::predecessor_account_id();
if signer == owner.clone() {
if let Some((bidder, (price, _timestamp))) = it.next() {
end_auction(it, bidder, price);
} else {
self.auctions.remove(&key);
}
} else {
if let Some((bidder, (price, _timestamp))) = it.next() {
if bidder == signer {
end_auction(it, bidder, price);
return;
}
}
match bids.remove(&signer) {
None => env::panic("Cannot clear an item if not bidding for it".as_bytes()),
Some((price, _)) => Promise::new(signer).transfer(price),
};
}
}
/// Internal method to transfer a corgi.
fn move_corgi(
&mut self,
key: CorgiKey,
id: CorgiId,
old_owner: AccountId,
new_owner: AccountId,
mut corgi: Corgi,
) {
self.delete_corgi_from(id, old_owner.clone());
corgi.owner = new_owner;
corgi.sender = old_owner;
corgi.modified = env::block_timestamp();
self.push_corgi(key, corgi);
}
/// Gets the `Corgi` with `id`.
fn get_corgi(&self, id: &CorgiId) -> (CorgiKey, Corgi) {
let key = decode(id);
match self.corgis.get(&key) {
None => env::panic("Given corgi id was not found".as_bytes()),
Some(corgi) => {
assert!(corgi.id == *id);
(key, corgi)
}
}
}
/// Gets auction information for the `Corgi` with `token_id` or panics.
fn get_auction(&self, token_id: &CorgiId) -> (CorgiKey, Dict<AccountId, (u128, u64)>, u64) {
let key = decode(&token_id);
match self.auctions.get(&key) {
None => env::panic("Corgi is not available for sale".as_bytes()),
Some((bids, expires)) => (key, bids, expires),
}
}
/// Gets sale information for a given `Corgi`.
fn get_for_sale(&self, key: CorgiKey, corgi: Corgi) -> CorgiDTO {
match self.auctions.get(&key) {
None => CorgiDTO::new(corgi),
Some(item) => CorgiDTO::for_sale(corgi, item),
}
}
/// Inserts a `Corgi` into the top the dictionary.
fn push_corgi(&mut self, key: CorgiKey, corgi: Corgi) -> Corgi {
env::log("push_corgi".as_bytes());
let corgi = self.corgis.push_front(&key, corgi);
let mut ids = self.corgis_by_owner.get(&corgi.owner).unwrap_or_else(|| {
Dict::new(get_collection_key(
CORGIS_BY_OWNER_PREFIX,
corgi.owner.clone(),
))
});
ids.push_front(&key, ());
self.corgis_by_owner.insert(&corgi.owner, &ids);
corgi
}
/// Ensures the given `Corgi` with `key` is not for sale.
fn panic_if_corgi_is_locked(&self, key: CorgiKey) {
if self.auctions.get(&key).is_some() {
env::panic("Corgi is currently locked".as_bytes());
}
}
}
fn get_collection_key(prefix: &str, mut key: String) -> Vec<u8> {
key.insert_str(0, prefix);
key.as_bytes().to_vec()
}
| fault() | identifier_name |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
}
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
} |
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct Numberer<V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
}
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | } | random_line_split |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) |
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
}
}
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct Numberer<V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
}
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
} | conditional_block |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
}
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
}
}
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct | <V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
}
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | Numberer | identifier_name |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
}
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
}
}
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct Numberer<V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self |
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
} | identifier_body |
session.rs | remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{
shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues
for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn find_address_space(&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() &!MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> | {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
) != 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_size =
read_val_mem(clone_leader, num_byes_addr, None) as usize + size_of::<syscallbuf_hdr>();
}
read_mem(clone_leader, start, data_size, None) | identifier_body |
|
session.rs | number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) | else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{
shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues
for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn find_address_space(&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() &!MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
)!= 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_ | {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} | conditional_block |
session.rs | number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{
shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues
for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn | (&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() &!MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
)!= 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_ | find_address_space | identifier_name |
session.rs | _number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{ | for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn find_address_space(&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() &!MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
)!= 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_size = | shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues | random_line_split |
validation_host.rs | // Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg(not(any(target_os = "android", target_os = "unknown")))]
use std::{process, env, sync::Arc, sync::atomic, path::PathBuf};
use codec::{Decode, Encode};
use crate::primitives::{ValidationParams, ValidationResult};
use super::{
validate_candidate_internal, ValidationError, InvalidCandidate, InternalError,
MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM,
};
use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet};
use parking_lot::Mutex;
use log::{debug, trace};
use futures::executor::ThreadPool;
use sp_core::traits::SpawnNamed;
const WORKER_ARG: &'static str = "validation-worker";
/// CLI Argument to start in validation worker mode.
pub const WORKER_ARGS: &[&'static str] = &[WORKER_ARG];
/// Execution timeout in seconds;
#[cfg(debug_assertions)]
pub const EXECUTION_TIMEOUT_SEC: u64 = 30;
#[cfg(not(debug_assertions))]
pub const EXECUTION_TIMEOUT_SEC: u64 = 5;
enum Event {
CandidateReady = 0,
ResultReady = 1,
WorkerReady = 2,
}
#[derive(Clone)]
struct TaskExecutor(ThreadPool);
impl TaskExecutor {
fn new() -> Result<Self, String> {
ThreadPool::new().map_err(|e| e.to_string()).map(Self)
}
}
impl SpawnNamed for TaskExecutor {
fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
}
/// A pool of hosts.
#[derive(Clone, Debug)]
pub struct ValidationPool {
hosts: Arc<Vec<Mutex<ValidationHost>>>,
}
const DEFAULT_NUM_HOSTS: usize = 8;
impl ValidationPool {
/// Creates a validation pool with the default configuration.
pub fn new() -> ValidationPool {
ValidationPool {
hosts: Arc::new((0..DEFAULT_NUM_HOSTS).map(|_| Default::default()).collect()),
}
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use `std::env::current_exe()` with the default arguments [`WORKER_ARGS`] to run the worker.
pub fn validate_candidate(
&self,
validation_code: &[u8],
params: ValidationParams,
) -> Result<ValidationResult, ValidationError> {
self.validate_candidate_custom(
validation_code,
params,
&env::current_exe().map_err(|err| ValidationError::Internal(err.into()))?,
WORKER_ARGS,
)
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use the command and the arguments provided in the function's arguments to run the worker.
pub fn validate_candidate_custom(
&self,
validation_code: &[u8],
params: ValidationParams,
command: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
for host in self.hosts.iter() {
if let Some(mut host) = host.try_lock() {
return host.validate_candidate(validation_code, params, command, args)
}
}
// all workers are busy, just wait for the first one
self.hosts[0].lock().validate_candidate(validation_code, params, command, args)
}
}
/// Validation worker process entry point. Runs a loop waiting for candidates to validate
/// and sends back results via shared memory.
pub fn run_worker(mem_id: &str) -> Result<(), String> {
let mut memory = match SharedMem::open(mem_id) {
Ok(memory) => memory,
Err(e) => {
debug!("{} Error opening shared memory: {:?}", process::id(), e);
return Err(format!("Error opening shared memory: {:?}", e));
}
};
let exit = Arc::new(atomic::AtomicBool::new(false));
let task_executor = TaskExecutor::new()?;
// spawn parent monitor thread
let watch_exit = exit.clone();
std::thread::spawn(move || {
use std::io::Read;
let mut in_data = Vec::new();
// pipe terminates when parent process exits
std::io::stdin().read_to_end(&mut in_data).ok();
debug!("{} Parent process is dead. Exiting", process::id());
exit.store(true, atomic::Ordering::Relaxed);
});
memory.set(Event::WorkerReady as usize, EventState::Signaled)
.map_err(|e| format!("{} Error setting shared event: {:?}", process::id(), e))?;
loop {
if watch_exit.load(atomic::Ordering::Relaxed) {
break;
}
debug!("{} Waiting for candidate", process::id());
match memory.wait(Event::CandidateReady as usize, shared_memory::Timeout::Sec(3)) {
Err(e) => {
// Timeout
trace!("{} Timeout waiting for candidate: {:?}", process::id(), e);
continue;
}
Ok(()) => {}
}
{
debug!("{} Processing candidate", process::id());
// we have candidate data
let mut slice = memory.wlock_as_slice(0)
.map_err(|e| format!("Error locking shared memory: {:?}", e))?;
let result = {
let data: &mut[u8] = &mut **slice;
let (header_buf, rest) = data.split_at_mut(1024);
let mut header_buf: &[u8] = header_buf;
let header = ValidationHeader::decode(&mut header_buf)
.map_err(|_| format!("Error decoding validation request."))?;
debug!("{} Candidate header: {:?}", process::id(), header);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(header.code_size as usize);
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
let (call_data, _) = call_data.split_at_mut(header.params_size as usize);
let result = validate_candidate_internal(code, call_data, task_executor.clone());
debug!("{} Candidate validated: {:?}", process::id(), result);
match result {
Ok(r) => ValidationResultHeader::Ok(r),
Err(ValidationError::Internal(e)) =>
ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())),
Err(ValidationError::InvalidCandidate(e)) =>
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())),
}
};
let mut data: &mut[u8] = &mut **slice;
result.encode_to(&mut data);
}
debug!("{} Signaling result", process::id());
memory.set(Event::ResultReady as usize, EventState::Signaled)
.map_err(|e| format!("Error setting shared event: {:?}", e))?;
}
Ok(())
}
/// Params header in shared memory. All offsets should be aligned to WASM page size.
#[derive(Encode, Decode, Debug)]
struct ValidationHeader {
code_size: u64,
params_size: u64,
}
#[derive(Encode, Decode, Debug)]
enum WorkerValidationError {
InternalError(String),
ValidationError(String),
}
#[derive(Encode, Decode, Debug)]
enum ValidationResultHeader {
Ok(ValidationResult),
Error(WorkerValidationError),
}
unsafe impl Send for ValidationHost {}
struct ValidationHostMemory(SharedMem);
impl std::fmt::Debug for ValidationHostMemory {
fn | (&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ValidationHostMemory")
}
}
impl std::ops::Deref for ValidationHostMemory {
type Target = SharedMem;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ValidationHostMemory {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
struct ValidationHost {
worker: Option<process::Child>,
memory: Option<ValidationHostMemory>,
id: u32,
}
impl Drop for ValidationHost {
fn drop(&mut self) {
if let Some(ref mut worker) = &mut self.worker {
worker.kill().ok();
}
}
}
impl ValidationHost {
fn create_memory() -> Result<SharedMem, InternalError> {
let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM;
let mem_config = SharedMemConf::default()
.set_size(mem_size)
.add_lock(shared_memory::LockType::Mutex, 0, mem_size)?
.add_event(shared_memory::EventType::Auto)? // Event::CandidateReady
.add_event(shared_memory::EventType::Auto)? // Event::ResultReady
.add_event(shared_memory::EventType::Auto)?; // Event::WorkerReady
Ok(mem_config.create()?)
}
fn start_worker(&mut self, cmd: &PathBuf, args: &[&str]) -> Result<(), InternalError> {
if let Some(ref mut worker) = self.worker {
// Check if still alive
if let Ok(None) = worker.try_wait() {
// Still running
return Ok(());
}
}
let memory = Self::create_memory()?;
debug!("Starting worker at {:?} with arguments: {:?} and {:?}", cmd, args, memory.get_os_path());
let worker = process::Command::new(cmd)
.args(args)
.arg(memory.get_os_path())
.stdin(process::Stdio::piped())
.spawn()?;
self.id = worker.id();
self.worker = Some(worker);
memory.wait(
Event::WorkerReady as usize,
shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize),
)?;
self.memory = Some(ValidationHostMemory(memory));
Ok(())
}
/// Validate a candidate under the given validation code.
///
/// This will fail if the validation code is not a proper parachain validation module.
pub fn validate_candidate(
&mut self,
validation_code: &[u8],
params: ValidationParams,
binary: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
if validation_code.len() > MAX_CODE_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len())));
}
// First, check if need to spawn the child process
self.start_worker(binary, args)?;
let memory = self.memory.as_mut()
.expect("memory is always `Some` after `start_worker` completes successfully");
{
// Put data in shared mem
let data: &mut[u8] = &mut **memory.wlock_as_slice(0)
.map_err(|e|ValidationError::Internal(e.into()))?;
let (mut header_buf, rest) = data.split_at_mut(1024);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(validation_code.len());
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
code[..validation_code.len()].copy_from_slice(validation_code);
let encoded_params = params.encode();
if encoded_params.len() >= MAX_RUNTIME_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM)));
}
call_data[..encoded_params.len()].copy_from_slice(&encoded_params);
let header = ValidationHeader {
code_size: validation_code.len() as u64,
params_size: encoded_params.len() as u64,
};
header.encode_to(&mut header_buf);
}
debug!("{} Signaling candidate", self.id);
memory.set(Event::CandidateReady as usize, EventState::Signaled)
.map_err(|e| ValidationError::Internal(e.into()))?;
debug!("{} Waiting for results", self.id);
match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) {
Err(e) => {
debug!("Worker timeout: {:?}", e);
if let Some(mut worker) = self.worker.take() {
worker.kill().ok();
}
return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout));
}
Ok(()) => {}
}
{
debug!("{} Reading results", self.id);
let data: &[u8] = &**memory.wlock_as_slice(0)
.map_err(|e| ValidationError::Internal(e.into()))?;
let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM);
let mut header_buf: &[u8] = header_buf;
let header = ValidationResultHeader::decode(&mut header_buf)
.map_err(|e|
InternalError::System(
Box::<dyn std::error::Error + Send + Sync>::from(
format!("Failed to decode `ValidationResultHeader`: {:?}", e)
) as Box<_>
)
)?;
match header {
ValidationResultHeader::Ok(result) => Ok(result),
ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => {
debug!("{} Internal validation error: {}", self.id, e);
Err(ValidationError::Internal(InternalError::WasmWorker(e)))
},
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => {
debug!("{} External validation error: {}", self.id, e);
Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e)))
}
}
}
}
}
| fmt | identifier_name |
validation_host.rs | // Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg(not(any(target_os = "android", target_os = "unknown")))]
use std::{process, env, sync::Arc, sync::atomic, path::PathBuf};
use codec::{Decode, Encode};
use crate::primitives::{ValidationParams, ValidationResult};
use super::{
validate_candidate_internal, ValidationError, InvalidCandidate, InternalError,
MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM,
};
use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet};
use parking_lot::Mutex;
use log::{debug, trace};
use futures::executor::ThreadPool;
use sp_core::traits::SpawnNamed;
const WORKER_ARG: &'static str = "validation-worker";
/// CLI Argument to start in validation worker mode.
pub const WORKER_ARGS: &[&'static str] = &[WORKER_ARG];
/// Execution timeout in seconds;
#[cfg(debug_assertions)]
pub const EXECUTION_TIMEOUT_SEC: u64 = 30;
#[cfg(not(debug_assertions))]
pub const EXECUTION_TIMEOUT_SEC: u64 = 5;
enum Event {
CandidateReady = 0,
ResultReady = 1,
WorkerReady = 2,
}
#[derive(Clone)]
struct TaskExecutor(ThreadPool);
impl TaskExecutor {
fn new() -> Result<Self, String> {
ThreadPool::new().map_err(|e| e.to_string()).map(Self)
}
}
impl SpawnNamed for TaskExecutor {
fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
}
/// A pool of hosts.
#[derive(Clone, Debug)]
pub struct ValidationPool {
hosts: Arc<Vec<Mutex<ValidationHost>>>,
}
const DEFAULT_NUM_HOSTS: usize = 8;
impl ValidationPool {
/// Creates a validation pool with the default configuration.
pub fn new() -> ValidationPool {
ValidationPool {
hosts: Arc::new((0..DEFAULT_NUM_HOSTS).map(|_| Default::default()).collect()),
}
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use `std::env::current_exe()` with the default arguments [`WORKER_ARGS`] to run the worker.
pub fn validate_candidate(
&self,
validation_code: &[u8],
params: ValidationParams,
) -> Result<ValidationResult, ValidationError> {
self.validate_candidate_custom(
validation_code,
params,
&env::current_exe().map_err(|err| ValidationError::Internal(err.into()))?,
WORKER_ARGS,
)
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use the command and the arguments provided in the function's arguments to run the worker.
pub fn validate_candidate_custom(
&self,
validation_code: &[u8],
params: ValidationParams,
command: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
for host in self.hosts.iter() {
if let Some(mut host) = host.try_lock() {
return host.validate_candidate(validation_code, params, command, args)
}
}
// all workers are busy, just wait for the first one
self.hosts[0].lock().validate_candidate(validation_code, params, command, args)
}
}
/// Validation worker process entry point. Runs a loop waiting for candidates to validate
/// and sends back results via shared memory.
pub fn run_worker(mem_id: &str) -> Result<(), String> {
let mut memory = match SharedMem::open(mem_id) {
Ok(memory) => memory,
Err(e) => {
debug!("{} Error opening shared memory: {:?}", process::id(), e);
return Err(format!("Error opening shared memory: {:?}", e));
}
};
let exit = Arc::new(atomic::AtomicBool::new(false));
let task_executor = TaskExecutor::new()?;
// spawn parent monitor thread
let watch_exit = exit.clone();
std::thread::spawn(move || {
use std::io::Read;
let mut in_data = Vec::new();
// pipe terminates when parent process exits
std::io::stdin().read_to_end(&mut in_data).ok();
debug!("{} Parent process is dead. Exiting", process::id());
exit.store(true, atomic::Ordering::Relaxed);
});
memory.set(Event::WorkerReady as usize, EventState::Signaled)
.map_err(|e| format!("{} Error setting shared event: {:?}", process::id(), e))?;
loop {
if watch_exit.load(atomic::Ordering::Relaxed) {
break;
}
debug!("{} Waiting for candidate", process::id());
match memory.wait(Event::CandidateReady as usize, shared_memory::Timeout::Sec(3)) {
Err(e) => {
// Timeout
trace!("{} Timeout waiting for candidate: {:?}", process::id(), e);
continue;
}
Ok(()) => {}
}
{
debug!("{} Processing candidate", process::id());
// we have candidate data
let mut slice = memory.wlock_as_slice(0)
.map_err(|e| format!("Error locking shared memory: {:?}", e))?;
let result = {
let data: &mut[u8] = &mut **slice;
let (header_buf, rest) = data.split_at_mut(1024);
let mut header_buf: &[u8] = header_buf;
let header = ValidationHeader::decode(&mut header_buf)
.map_err(|_| format!("Error decoding validation request."))?;
debug!("{} Candidate header: {:?}", process::id(), header);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(header.code_size as usize);
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
let (call_data, _) = call_data.split_at_mut(header.params_size as usize);
let result = validate_candidate_internal(code, call_data, task_executor.clone());
debug!("{} Candidate validated: {:?}", process::id(), result);
match result {
Ok(r) => ValidationResultHeader::Ok(r),
Err(ValidationError::Internal(e)) =>
ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())),
Err(ValidationError::InvalidCandidate(e)) =>
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())),
}
};
let mut data: &mut[u8] = &mut **slice;
result.encode_to(&mut data);
}
debug!("{} Signaling result", process::id());
memory.set(Event::ResultReady as usize, EventState::Signaled)
.map_err(|e| format!("Error setting shared event: {:?}", e))?;
}
Ok(())
}
/// Params header in shared memory. All offsets should be aligned to WASM page size.
#[derive(Encode, Decode, Debug)]
struct ValidationHeader {
code_size: u64,
params_size: u64,
}
#[derive(Encode, Decode, Debug)]
enum WorkerValidationError {
InternalError(String),
ValidationError(String),
}
#[derive(Encode, Decode, Debug)]
enum ValidationResultHeader {
Ok(ValidationResult),
Error(WorkerValidationError),
}
unsafe impl Send for ValidationHost {}
struct ValidationHostMemory(SharedMem);
impl std::fmt::Debug for ValidationHostMemory {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ValidationHostMemory")
}
}
impl std::ops::Deref for ValidationHostMemory {
type Target = SharedMem;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ValidationHostMemory {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
struct ValidationHost {
worker: Option<process::Child>,
memory: Option<ValidationHostMemory>,
id: u32,
}
impl Drop for ValidationHost {
fn drop(&mut self) {
if let Some(ref mut worker) = &mut self.worker {
worker.kill().ok();
}
}
}
impl ValidationHost {
fn create_memory() -> Result<SharedMem, InternalError> {
let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM;
let mem_config = SharedMemConf::default()
.set_size(mem_size)
.add_lock(shared_memory::LockType::Mutex, 0, mem_size)?
.add_event(shared_memory::EventType::Auto)? // Event::CandidateReady
.add_event(shared_memory::EventType::Auto)? // Event::ResultReady
.add_event(shared_memory::EventType::Auto)?; // Event::WorkerReady
Ok(mem_config.create()?)
}
fn start_worker(&mut self, cmd: &PathBuf, args: &[&str]) -> Result<(), InternalError> {
if let Some(ref mut worker) = self.worker {
// Check if still alive
if let Ok(None) = worker.try_wait() {
// Still running
return Ok(());
}
}
let memory = Self::create_memory()?;
debug!("Starting worker at {:?} with arguments: {:?} and {:?}", cmd, args, memory.get_os_path());
let worker = process::Command::new(cmd)
.args(args)
.arg(memory.get_os_path())
.stdin(process::Stdio::piped())
.spawn()?;
self.id = worker.id();
self.worker = Some(worker);
memory.wait(
Event::WorkerReady as usize,
shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize),
)?;
self.memory = Some(ValidationHostMemory(memory));
Ok(())
}
/// Validate a candidate under the given validation code.
///
/// This will fail if the validation code is not a proper parachain validation module.
pub fn validate_candidate(
&mut self, | if validation_code.len() > MAX_CODE_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len())));
}
// First, check if need to spawn the child process
self.start_worker(binary, args)?;
let memory = self.memory.as_mut()
.expect("memory is always `Some` after `start_worker` completes successfully");
{
// Put data in shared mem
let data: &mut[u8] = &mut **memory.wlock_as_slice(0)
.map_err(|e|ValidationError::Internal(e.into()))?;
let (mut header_buf, rest) = data.split_at_mut(1024);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(validation_code.len());
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
code[..validation_code.len()].copy_from_slice(validation_code);
let encoded_params = params.encode();
if encoded_params.len() >= MAX_RUNTIME_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM)));
}
call_data[..encoded_params.len()].copy_from_slice(&encoded_params);
let header = ValidationHeader {
code_size: validation_code.len() as u64,
params_size: encoded_params.len() as u64,
};
header.encode_to(&mut header_buf);
}
debug!("{} Signaling candidate", self.id);
memory.set(Event::CandidateReady as usize, EventState::Signaled)
.map_err(|e| ValidationError::Internal(e.into()))?;
debug!("{} Waiting for results", self.id);
match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) {
Err(e) => {
debug!("Worker timeout: {:?}", e);
if let Some(mut worker) = self.worker.take() {
worker.kill().ok();
}
return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout));
}
Ok(()) => {}
}
{
debug!("{} Reading results", self.id);
let data: &[u8] = &**memory.wlock_as_slice(0)
.map_err(|e| ValidationError::Internal(e.into()))?;
let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM);
let mut header_buf: &[u8] = header_buf;
let header = ValidationResultHeader::decode(&mut header_buf)
.map_err(|e|
InternalError::System(
Box::<dyn std::error::Error + Send + Sync>::from(
format!("Failed to decode `ValidationResultHeader`: {:?}", e)
) as Box<_>
)
)?;
match header {
ValidationResultHeader::Ok(result) => Ok(result),
ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => {
debug!("{} Internal validation error: {}", self.id, e);
Err(ValidationError::Internal(InternalError::WasmWorker(e)))
},
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => {
debug!("{} External validation error: {}", self.id, e);
Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e)))
}
}
}
}
} | validation_code: &[u8],
params: ValidationParams,
binary: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> { | random_line_split |
validation_host.rs | // Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg(not(any(target_os = "android", target_os = "unknown")))]
use std::{process, env, sync::Arc, sync::atomic, path::PathBuf};
use codec::{Decode, Encode};
use crate::primitives::{ValidationParams, ValidationResult};
use super::{
validate_candidate_internal, ValidationError, InvalidCandidate, InternalError,
MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM,
};
use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet};
use parking_lot::Mutex;
use log::{debug, trace};
use futures::executor::ThreadPool;
use sp_core::traits::SpawnNamed;
const WORKER_ARG: &'static str = "validation-worker";
/// CLI Argument to start in validation worker mode.
pub const WORKER_ARGS: &[&'static str] = &[WORKER_ARG];
/// Execution timeout in seconds;
#[cfg(debug_assertions)]
pub const EXECUTION_TIMEOUT_SEC: u64 = 30;
#[cfg(not(debug_assertions))]
pub const EXECUTION_TIMEOUT_SEC: u64 = 5;
enum Event {
CandidateReady = 0,
ResultReady = 1,
WorkerReady = 2,
}
#[derive(Clone)]
struct TaskExecutor(ThreadPool);
impl TaskExecutor {
fn new() -> Result<Self, String> {
ThreadPool::new().map_err(|e| e.to_string()).map(Self)
}
}
impl SpawnNamed for TaskExecutor {
fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
}
/// A pool of hosts.
#[derive(Clone, Debug)]
pub struct ValidationPool {
hosts: Arc<Vec<Mutex<ValidationHost>>>,
}
const DEFAULT_NUM_HOSTS: usize = 8;
impl ValidationPool {
/// Creates a validation pool with the default configuration.
pub fn new() -> ValidationPool {
ValidationPool {
hosts: Arc::new((0..DEFAULT_NUM_HOSTS).map(|_| Default::default()).collect()),
}
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use `std::env::current_exe()` with the default arguments [`WORKER_ARGS`] to run the worker.
pub fn validate_candidate(
&self,
validation_code: &[u8],
params: ValidationParams,
) -> Result<ValidationResult, ValidationError> {
self.validate_candidate_custom(
validation_code,
params,
&env::current_exe().map_err(|err| ValidationError::Internal(err.into()))?,
WORKER_ARGS,
)
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use the command and the arguments provided in the function's arguments to run the worker.
pub fn validate_candidate_custom(
&self,
validation_code: &[u8],
params: ValidationParams,
command: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
for host in self.hosts.iter() {
if let Some(mut host) = host.try_lock() {
return host.validate_candidate(validation_code, params, command, args)
}
}
// all workers are busy, just wait for the first one
self.hosts[0].lock().validate_candidate(validation_code, params, command, args)
}
}
/// Validation worker process entry point. Runs a loop waiting for candidates to validate
/// and sends back results via shared memory.
pub fn run_worker(mem_id: &str) -> Result<(), String> {
let mut memory = match SharedMem::open(mem_id) {
Ok(memory) => memory,
Err(e) => {
debug!("{} Error opening shared memory: {:?}", process::id(), e);
return Err(format!("Error opening shared memory: {:?}", e));
}
};
let exit = Arc::new(atomic::AtomicBool::new(false));
let task_executor = TaskExecutor::new()?;
// spawn parent monitor thread
let watch_exit = exit.clone();
std::thread::spawn(move || {
use std::io::Read;
let mut in_data = Vec::new();
// pipe terminates when parent process exits
std::io::stdin().read_to_end(&mut in_data).ok();
debug!("{} Parent process is dead. Exiting", process::id());
exit.store(true, atomic::Ordering::Relaxed);
});
memory.set(Event::WorkerReady as usize, EventState::Signaled)
.map_err(|e| format!("{} Error setting shared event: {:?}", process::id(), e))?;
loop {
if watch_exit.load(atomic::Ordering::Relaxed) {
break;
}
debug!("{} Waiting for candidate", process::id());
match memory.wait(Event::CandidateReady as usize, shared_memory::Timeout::Sec(3)) {
Err(e) => {
// Timeout
trace!("{} Timeout waiting for candidate: {:?}", process::id(), e);
continue;
}
Ok(()) => {}
}
{
debug!("{} Processing candidate", process::id());
// we have candidate data
let mut slice = memory.wlock_as_slice(0)
.map_err(|e| format!("Error locking shared memory: {:?}", e))?;
let result = {
let data: &mut[u8] = &mut **slice;
let (header_buf, rest) = data.split_at_mut(1024);
let mut header_buf: &[u8] = header_buf;
let header = ValidationHeader::decode(&mut header_buf)
.map_err(|_| format!("Error decoding validation request."))?;
debug!("{} Candidate header: {:?}", process::id(), header);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(header.code_size as usize);
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
let (call_data, _) = call_data.split_at_mut(header.params_size as usize);
let result = validate_candidate_internal(code, call_data, task_executor.clone());
debug!("{} Candidate validated: {:?}", process::id(), result);
match result {
Ok(r) => ValidationResultHeader::Ok(r),
Err(ValidationError::Internal(e)) =>
ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())),
Err(ValidationError::InvalidCandidate(e)) =>
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())),
}
};
let mut data: &mut[u8] = &mut **slice;
result.encode_to(&mut data);
}
debug!("{} Signaling result", process::id());
memory.set(Event::ResultReady as usize, EventState::Signaled)
.map_err(|e| format!("Error setting shared event: {:?}", e))?;
}
Ok(())
}
/// Params header in shared memory. All offsets should be aligned to WASM page size.
#[derive(Encode, Decode, Debug)]
struct ValidationHeader {
code_size: u64,
params_size: u64,
}
#[derive(Encode, Decode, Debug)]
enum WorkerValidationError {
InternalError(String),
ValidationError(String),
}
#[derive(Encode, Decode, Debug)]
enum ValidationResultHeader {
Ok(ValidationResult),
Error(WorkerValidationError),
}
unsafe impl Send for ValidationHost {}
struct ValidationHostMemory(SharedMem);
impl std::fmt::Debug for ValidationHostMemory {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ValidationHostMemory")
}
}
impl std::ops::Deref for ValidationHostMemory {
type Target = SharedMem;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ValidationHostMemory {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
struct ValidationHost {
worker: Option<process::Child>,
memory: Option<ValidationHostMemory>,
id: u32,
}
impl Drop for ValidationHost {
fn drop(&mut self) {
if let Some(ref mut worker) = &mut self.worker {
worker.kill().ok();
}
}
}
impl ValidationHost {
fn create_memory() -> Result<SharedMem, InternalError> {
let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM;
let mem_config = SharedMemConf::default()
.set_size(mem_size)
.add_lock(shared_memory::LockType::Mutex, 0, mem_size)?
.add_event(shared_memory::EventType::Auto)? // Event::CandidateReady
.add_event(shared_memory::EventType::Auto)? // Event::ResultReady
.add_event(shared_memory::EventType::Auto)?; // Event::WorkerReady
Ok(mem_config.create()?)
}
fn start_worker(&mut self, cmd: &PathBuf, args: &[&str]) -> Result<(), InternalError> {
if let Some(ref mut worker) = self.worker {
// Check if still alive
if let Ok(None) = worker.try_wait() {
// Still running
return Ok(());
}
}
let memory = Self::create_memory()?;
debug!("Starting worker at {:?} with arguments: {:?} and {:?}", cmd, args, memory.get_os_path());
let worker = process::Command::new(cmd)
.args(args)
.arg(memory.get_os_path())
.stdin(process::Stdio::piped())
.spawn()?;
self.id = worker.id();
self.worker = Some(worker);
memory.wait(
Event::WorkerReady as usize,
shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize),
)?;
self.memory = Some(ValidationHostMemory(memory));
Ok(())
}
/// Validate a candidate under the given validation code.
///
/// This will fail if the validation code is not a proper parachain validation module.
pub fn validate_candidate(
&mut self,
validation_code: &[u8],
params: ValidationParams,
binary: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
if validation_code.len() > MAX_CODE_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len())));
}
// First, check if need to spawn the child process
self.start_worker(binary, args)?;
let memory = self.memory.as_mut()
.expect("memory is always `Some` after `start_worker` completes successfully");
{
// Put data in shared mem
let data: &mut[u8] = &mut **memory.wlock_as_slice(0)
.map_err(|e|ValidationError::Internal(e.into()))?;
let (mut header_buf, rest) = data.split_at_mut(1024);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(validation_code.len());
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
code[..validation_code.len()].copy_from_slice(validation_code);
let encoded_params = params.encode();
if encoded_params.len() >= MAX_RUNTIME_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM)));
}
call_data[..encoded_params.len()].copy_from_slice(&encoded_params);
let header = ValidationHeader {
code_size: validation_code.len() as u64,
params_size: encoded_params.len() as u64,
};
header.encode_to(&mut header_buf);
}
debug!("{} Signaling candidate", self.id);
memory.set(Event::CandidateReady as usize, EventState::Signaled)
.map_err(|e| ValidationError::Internal(e.into()))?;
debug!("{} Waiting for results", self.id);
match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) {
Err(e) => {
debug!("Worker timeout: {:?}", e);
if let Some(mut worker) = self.worker.take() {
worker.kill().ok();
}
return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout));
}
Ok(()) => |
}
{
debug!("{} Reading results", self.id);
let data: &[u8] = &**memory.wlock_as_slice(0)
.map_err(|e| ValidationError::Internal(e.into()))?;
let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM);
let mut header_buf: &[u8] = header_buf;
let header = ValidationResultHeader::decode(&mut header_buf)
.map_err(|e|
InternalError::System(
Box::<dyn std::error::Error + Send + Sync>::from(
format!("Failed to decode `ValidationResultHeader`: {:?}", e)
) as Box<_>
)
)?;
match header {
ValidationResultHeader::Ok(result) => Ok(result),
ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => {
debug!("{} Internal validation error: {}", self.id, e);
Err(ValidationError::Internal(InternalError::WasmWorker(e)))
},
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => {
debug!("{} External validation error: {}", self.id, e);
Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e)))
}
}
}
}
}
| {} | conditional_block |
log_file.rs | use std::collections::{HashMap, HashSet};
use std::fmt;
use std::ffi::CString;
use std::io::{Result, Error, ErrorKind};
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use libc;
use log::{error, info, warn};
use crate::{Data, ArcDataSlice};
use super::*;
pub(super) struct LogFile {
file_path: Box<PathBuf>,
pub file_id: FileId,
fd: libc::c_int,
pub len: usize,
pub max_size: usize,
pub file_uuid: uuid::Uuid
}
impl Drop for LogFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
impl fmt::Display for LogFile {
fn | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CRL Sweeper File: {}", self.file_path.as_path().display())
}
}
#[cfg(target_os="linux")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const O_DIRECT: libc::c_int = 0x4000;
const O_DSYNC: libc::c_int = 4000;
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR | O_DIRECT | O_DSYNC)
}
}
#[cfg(target_os="macos")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const F_NOCACHE: libc::c_int = 0x30;
unsafe {
let mut fd = libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR);
if fd > 0 {
if libc::fchmod(fd, 0o644) < 0 {
fd = -1;
}
}
if fd > 0 {
if libc::fcntl(fd, F_NOCACHE, 1) < 0 {
fd = -1;
}
}
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if!( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
}
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0.. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as u64,
length: STATIC_ENTRY_SIZE as u32
};
let mut transactions: HashMap<TxId, RecoveringTx> = HashMap::new();
let mut allocations: HashMap<TxId, RecoveringAlloc> = HashMap::new();
let mut deleted_tx: HashSet<TxId> = HashSet::new();
let mut deleted_alloc: HashSet<TxId> = HashSet::new();
let mut file_id = last_file_id;
let mut entry_serial = last_serial;
let mut entry_block_offset = last_offset;
let earliest_serial_needed = {
let mut d = files[last_file_id.0 as usize].0.read(last_offset, STATIC_ENTRY_SIZE as usize)?;
let entry = encoding::decode_entry(&mut d)?;
LogEntrySerialNumber(entry.earliest_needed)
};
while entry_serial >= earliest_serial_needed {
let file = &files[file_id.0 as usize].0;
let mut d = file.read(entry_block_offset, STATIC_ENTRY_SIZE as usize)?;
let mut entry = encoding::decode_entry(&mut d)?;
entry_serial = entry.serial;
//println!("Reading Entry {:?} entry_block_offset {} entry offset {}", entry_serial, entry_block_offset, entry.entry_offset);
let entry_data_size = entry_block_offset - entry.entry_offset as usize;
let mut entry_data = file.read(entry.entry_offset as usize, entry_data_size)?;
encoding::load_entry_data(&mut entry_data, &mut entry, entry_serial)?;
for txid in &entry.tx_deletions {
deleted_tx.insert(*txid);
}
for txid in &entry.alloc_deletions {
deleted_alloc.insert(*txid);
}
for rtx in entry.transactions {
if! deleted_tx.contains(&rtx.id) &&! transactions.contains_key(&rtx.id) {
transactions.insert(rtx.id, rtx);
}
}
for ra in entry.allocations {
if! deleted_alloc.contains(&ra.id) &&! allocations.contains_key(&ra.id) {
allocations.insert(ra.id, ra);
}
}
if entry.previous_entry_location.offset < 16 {
break; // Cannot have an offset of 0 (first 16 bytes of the file are the UUID)
}
file_id = entry.previous_entry_location.file_id;
entry_block_offset = entry.previous_entry_location.offset as usize;
}
let get_data = |file_location: &FileLocation| -> Result<ArcData> {
let d = files[file_location.file_id.0 as usize].0.read(file_location.offset as usize, file_location.length as usize)?;
Ok(d.into())
};
let get_slice = |file_location: &FileLocation| -> Result<ArcDataSlice> {
let d = get_data(file_location)?;
Ok(d.into())
};
for (txid, rtx) in transactions {
let mut ou: Vec<transaction::ObjectUpdate> = Vec::with_capacity(rtx.object_updates.len());
for t in &rtx.object_updates {
ou.push(transaction::ObjectUpdate {
object_id: object::Id(t.0),
data: get_slice(&t.1)?
});
}
tx.push( RecoveredTx {
id: txid,
txd_location: rtx.serialized_transaction_description,
serialized_transaction_description: get_data(&rtx.serialized_transaction_description)?,
object_updates: ou,
update_locations: rtx.object_updates,
tx_disposition: rtx.tx_disposition,
paxos_state: rtx.paxos_state,
last_entry_serial: rtx.last_entry_serial
});
}
for (txid, ra) in allocations {
alloc.push(RecoveredAlloc{
id: txid,
store_pointer: ra.store_pointer,
object_id: ra.object_id,
kind: ra.kind,
size: ra.size,
data_location: ra.data,
data: get_data(&ra.data)?,
refcount: ra.refcount,
timestamp: ra.timestamp,
serialized_revision_guard: ra.serialized_revision_guard,
last_entry_serial: ra.last_entry_serial
});
}
};
Ok(RecoveredCrlState {
log_files: files,
transactions: tx,
allocations: alloc,
last_entry_serial,
last_entry_location
})
}
#[cfg(test)]
mod tests {
use tempdir::TempDir;
use super::*;
#[test]
fn initialization() {
let t = TempDir::new("test").unwrap();
let (l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
#[test]
fn recycle() {
let t = TempDir::new("test").unwrap();
let (mut l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
write_bytes(l.fd, &[1u8,2u8,3u8,4u8]).unwrap();
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 20);
}
l.recycle().unwrap();
let ru = pread_uuid(l.fd, 0).unwrap();
assert_ne!(u, ru);
assert_eq!(l.file_uuid, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
}
| fmt | identifier_name |
log_file.rs | use std::collections::{HashMap, HashSet};
use std::fmt;
use std::ffi::CString;
use std::io::{Result, Error, ErrorKind};
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use libc;
use log::{error, info, warn};
use crate::{Data, ArcDataSlice};
use super::*;
pub(super) struct LogFile {
file_path: Box<PathBuf>,
pub file_id: FileId, |
impl Drop for LogFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
impl fmt::Display for LogFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CRL Sweeper File: {}", self.file_path.as_path().display())
}
}
#[cfg(target_os="linux")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const O_DIRECT: libc::c_int = 0x4000;
const O_DSYNC: libc::c_int = 4000;
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR | O_DIRECT | O_DSYNC)
}
}
#[cfg(target_os="macos")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const F_NOCACHE: libc::c_int = 0x30;
unsafe {
let mut fd = libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR);
if fd > 0 {
if libc::fchmod(fd, 0o644) < 0 {
fd = -1;
}
}
if fd > 0 {
if libc::fcntl(fd, F_NOCACHE, 1) < 0 {
fd = -1;
}
}
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if!( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
}
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0.. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as u64,
length: STATIC_ENTRY_SIZE as u32
};
let mut transactions: HashMap<TxId, RecoveringTx> = HashMap::new();
let mut allocations: HashMap<TxId, RecoveringAlloc> = HashMap::new();
let mut deleted_tx: HashSet<TxId> = HashSet::new();
let mut deleted_alloc: HashSet<TxId> = HashSet::new();
let mut file_id = last_file_id;
let mut entry_serial = last_serial;
let mut entry_block_offset = last_offset;
let earliest_serial_needed = {
let mut d = files[last_file_id.0 as usize].0.read(last_offset, STATIC_ENTRY_SIZE as usize)?;
let entry = encoding::decode_entry(&mut d)?;
LogEntrySerialNumber(entry.earliest_needed)
};
while entry_serial >= earliest_serial_needed {
let file = &files[file_id.0 as usize].0;
let mut d = file.read(entry_block_offset, STATIC_ENTRY_SIZE as usize)?;
let mut entry = encoding::decode_entry(&mut d)?;
entry_serial = entry.serial;
//println!("Reading Entry {:?} entry_block_offset {} entry offset {}", entry_serial, entry_block_offset, entry.entry_offset);
let entry_data_size = entry_block_offset - entry.entry_offset as usize;
let mut entry_data = file.read(entry.entry_offset as usize, entry_data_size)?;
encoding::load_entry_data(&mut entry_data, &mut entry, entry_serial)?;
for txid in &entry.tx_deletions {
deleted_tx.insert(*txid);
}
for txid in &entry.alloc_deletions {
deleted_alloc.insert(*txid);
}
for rtx in entry.transactions {
if! deleted_tx.contains(&rtx.id) &&! transactions.contains_key(&rtx.id) {
transactions.insert(rtx.id, rtx);
}
}
for ra in entry.allocations {
if! deleted_alloc.contains(&ra.id) &&! allocations.contains_key(&ra.id) {
allocations.insert(ra.id, ra);
}
}
if entry.previous_entry_location.offset < 16 {
break; // Cannot have an offset of 0 (first 16 bytes of the file are the UUID)
}
file_id = entry.previous_entry_location.file_id;
entry_block_offset = entry.previous_entry_location.offset as usize;
}
let get_data = |file_location: &FileLocation| -> Result<ArcData> {
let d = files[file_location.file_id.0 as usize].0.read(file_location.offset as usize, file_location.length as usize)?;
Ok(d.into())
};
let get_slice = |file_location: &FileLocation| -> Result<ArcDataSlice> {
let d = get_data(file_location)?;
Ok(d.into())
};
for (txid, rtx) in transactions {
let mut ou: Vec<transaction::ObjectUpdate> = Vec::with_capacity(rtx.object_updates.len());
for t in &rtx.object_updates {
ou.push(transaction::ObjectUpdate {
object_id: object::Id(t.0),
data: get_slice(&t.1)?
});
}
tx.push( RecoveredTx {
id: txid,
txd_location: rtx.serialized_transaction_description,
serialized_transaction_description: get_data(&rtx.serialized_transaction_description)?,
object_updates: ou,
update_locations: rtx.object_updates,
tx_disposition: rtx.tx_disposition,
paxos_state: rtx.paxos_state,
last_entry_serial: rtx.last_entry_serial
});
}
for (txid, ra) in allocations {
alloc.push(RecoveredAlloc{
id: txid,
store_pointer: ra.store_pointer,
object_id: ra.object_id,
kind: ra.kind,
size: ra.size,
data_location: ra.data,
data: get_data(&ra.data)?,
refcount: ra.refcount,
timestamp: ra.timestamp,
serialized_revision_guard: ra.serialized_revision_guard,
last_entry_serial: ra.last_entry_serial
});
}
};
Ok(RecoveredCrlState {
log_files: files,
transactions: tx,
allocations: alloc,
last_entry_serial,
last_entry_location
})
}
#[cfg(test)]
mod tests {
use tempdir::TempDir;
use super::*;
#[test]
fn initialization() {
let t = TempDir::new("test").unwrap();
let (l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
#[test]
fn recycle() {
let t = TempDir::new("test").unwrap();
let (mut l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
write_bytes(l.fd, &[1u8,2u8,3u8,4u8]).unwrap();
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 20);
}
l.recycle().unwrap();
let ru = pread_uuid(l.fd, 0).unwrap();
assert_ne!(u, ru);
assert_eq!(l.file_uuid, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
} | fd: libc::c_int,
pub len: usize,
pub max_size: usize,
pub file_uuid: uuid::Uuid
} | random_line_split |
log_file.rs | use std::collections::{HashMap, HashSet};
use std::fmt;
use std::ffi::CString;
use std::io::{Result, Error, ErrorKind};
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use libc;
use log::{error, info, warn};
use crate::{Data, ArcDataSlice};
use super::*;
pub(super) struct LogFile {
file_path: Box<PathBuf>,
pub file_id: FileId,
fd: libc::c_int,
pub len: usize,
pub max_size: usize,
pub file_uuid: uuid::Uuid
}
impl Drop for LogFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
impl fmt::Display for LogFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CRL Sweeper File: {}", self.file_path.as_path().display())
}
}
#[cfg(target_os="linux")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const O_DIRECT: libc::c_int = 0x4000;
const O_DSYNC: libc::c_int = 4000;
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR | O_DIRECT | O_DSYNC)
}
}
#[cfg(target_os="macos")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const F_NOCACHE: libc::c_int = 0x30;
unsafe {
let mut fd = libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR);
if fd > 0 {
if libc::fchmod(fd, 0o644) < 0 {
fd = -1;
}
}
if fd > 0 {
if libc::fcntl(fd, F_NOCACHE, 1) < 0 {
fd = -1;
}
}
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if!( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
}
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0.. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as u64,
length: STATIC_ENTRY_SIZE as u32
};
let mut transactions: HashMap<TxId, RecoveringTx> = HashMap::new();
let mut allocations: HashMap<TxId, RecoveringAlloc> = HashMap::new();
let mut deleted_tx: HashSet<TxId> = HashSet::new();
let mut deleted_alloc: HashSet<TxId> = HashSet::new();
let mut file_id = last_file_id;
let mut entry_serial = last_serial;
let mut entry_block_offset = last_offset;
let earliest_serial_needed = {
let mut d = files[last_file_id.0 as usize].0.read(last_offset, STATIC_ENTRY_SIZE as usize)?;
let entry = encoding::decode_entry(&mut d)?;
LogEntrySerialNumber(entry.earliest_needed)
};
while entry_serial >= earliest_serial_needed {
let file = &files[file_id.0 as usize].0;
let mut d = file.read(entry_block_offset, STATIC_ENTRY_SIZE as usize)?;
let mut entry = encoding::decode_entry(&mut d)?;
entry_serial = entry.serial;
//println!("Reading Entry {:?} entry_block_offset {} entry offset {}", entry_serial, entry_block_offset, entry.entry_offset);
let entry_data_size = entry_block_offset - entry.entry_offset as usize;
let mut entry_data = file.read(entry.entry_offset as usize, entry_data_size)?;
encoding::load_entry_data(&mut entry_data, &mut entry, entry_serial)?;
for txid in &entry.tx_deletions {
deleted_tx.insert(*txid);
}
for txid in &entry.alloc_deletions {
deleted_alloc.insert(*txid);
}
for rtx in entry.transactions {
if! deleted_tx.contains(&rtx.id) &&! transactions.contains_key(&rtx.id) {
transactions.insert(rtx.id, rtx);
}
}
for ra in entry.allocations {
if! deleted_alloc.contains(&ra.id) &&! allocations.contains_key(&ra.id) {
allocations.insert(ra.id, ra);
}
}
if entry.previous_entry_location.offset < 16 |
file_id = entry.previous_entry_location.file_id;
entry_block_offset = entry.previous_entry_location.offset as usize;
}
let get_data = |file_location: &FileLocation| -> Result<ArcData> {
let d = files[file_location.file_id.0 as usize].0.read(file_location.offset as usize, file_location.length as usize)?;
Ok(d.into())
};
let get_slice = |file_location: &FileLocation| -> Result<ArcDataSlice> {
let d = get_data(file_location)?;
Ok(d.into())
};
for (txid, rtx) in transactions {
let mut ou: Vec<transaction::ObjectUpdate> = Vec::with_capacity(rtx.object_updates.len());
for t in &rtx.object_updates {
ou.push(transaction::ObjectUpdate {
object_id: object::Id(t.0),
data: get_slice(&t.1)?
});
}
tx.push( RecoveredTx {
id: txid,
txd_location: rtx.serialized_transaction_description,
serialized_transaction_description: get_data(&rtx.serialized_transaction_description)?,
object_updates: ou,
update_locations: rtx.object_updates,
tx_disposition: rtx.tx_disposition,
paxos_state: rtx.paxos_state,
last_entry_serial: rtx.last_entry_serial
});
}
for (txid, ra) in allocations {
alloc.push(RecoveredAlloc{
id: txid,
store_pointer: ra.store_pointer,
object_id: ra.object_id,
kind: ra.kind,
size: ra.size,
data_location: ra.data,
data: get_data(&ra.data)?,
refcount: ra.refcount,
timestamp: ra.timestamp,
serialized_revision_guard: ra.serialized_revision_guard,
last_entry_serial: ra.last_entry_serial
});
}
};
Ok(RecoveredCrlState {
log_files: files,
transactions: tx,
allocations: alloc,
last_entry_serial,
last_entry_location
})
}
#[cfg(test)]
mod tests {
use tempdir::TempDir;
use super::*;
#[test]
fn initialization() {
let t = TempDir::new("test").unwrap();
let (l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
#[test]
fn recycle() {
let t = TempDir::new("test").unwrap();
let (mut l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
write_bytes(l.fd, &[1u8,2u8,3u8,4u8]).unwrap();
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 20);
}
l.recycle().unwrap();
let ru = pread_uuid(l.fd, 0).unwrap();
assert_ne!(u, ru);
assert_eq!(l.file_uuid, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
}
| {
break; // Cannot have an offset of 0 (first 16 bytes of the file are the UUID)
} | conditional_block |
log_file.rs | use std::collections::{HashMap, HashSet};
use std::fmt;
use std::ffi::CString;
use std::io::{Result, Error, ErrorKind};
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use libc;
use log::{error, info, warn};
use crate::{Data, ArcDataSlice};
use super::*;
pub(super) struct LogFile {
file_path: Box<PathBuf>,
pub file_id: FileId,
fd: libc::c_int,
pub len: usize,
pub max_size: usize,
pub file_uuid: uuid::Uuid
}
impl Drop for LogFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
impl fmt::Display for LogFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CRL Sweeper File: {}", self.file_path.as_path().display())
}
}
#[cfg(target_os="linux")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const O_DIRECT: libc::c_int = 0x4000;
const O_DSYNC: libc::c_int = 4000;
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR | O_DIRECT | O_DSYNC)
}
}
#[cfg(target_os="macos")]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
const F_NOCACHE: libc::c_int = 0x30;
unsafe {
let mut fd = libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR);
if fd > 0 {
if libc::fchmod(fd, 0o644) < 0 {
fd = -1;
}
}
if fd > 0 {
if libc::fcntl(fd, F_NOCACHE, 1) < 0 {
fd = -1;
}
}
fd
}
}
#[cfg(not(any(target_os="linux", target_os="macos")))]
fn open_synchronous_fd(path: &CString) -> libc::c_int {
unsafe {
libc::open(path.as_ptr(), libc::O_CREAT | libc::O_RDWR)
}
}
impl LogFile {
fn new(
directory: &Path,
file_id: FileId,
max_file_size: usize) -> Result<(LogFile, Option<(LogEntrySerialNumber, usize)>)> {
let f = format!("{}", file_id.0);
let p = directory.join(f);
let fp = p.as_path();
let fd = open_synchronous_fd(&CString::new(fp.as_os_str().as_bytes()).unwrap());
if fd < 0 {
error!("Failed to open CRL file {}", fp.display());
return Err(Error::last_os_error());
}
let mut size = seek(fd, 0, libc::SEEK_END)?;
if size < (16 + STATIC_ENTRY_SIZE as usize) {
// Initialize
seek(fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(fd, 0);
}
let u = uuid::Uuid::new_v4();
write_bytes(fd, &u.as_bytes()[..])?;
}
let file_uuid = pread_uuid(fd, 0)?;
size = seek(fd, 0, libc::SEEK_END)?;
let last = find_last_valid_entry(fd, size, &file_uuid)?;
let lf = LogFile{
file_path: Box::new(p),
file_id,
fd,
len: size as usize,
max_size: max_file_size,
file_uuid
};
Ok((lf, last))
}
fn read(&self, offset: usize, nbytes: usize) -> Result<Data> {
let mut v = Vec::<u8>::with_capacity(nbytes);
if nbytes > 0 {
v.resize(nbytes, 0);
pread_bytes(self.fd, &mut v[..], offset)?;
}
Ok(Data::new(v))
}
pub(super) fn write(&mut self, data: &Vec<ArcDataSlice>) -> Result<()> {
let wsize: usize = data.iter().map(|d| d.len()).sum();
unsafe {
let iov: Vec<libc::iovec> = data.iter().map( |d| {
let p: *const u8 = &d.as_bytes()[0];
libc::iovec {
iov_base: p as *mut libc::c_void,
iov_len: d.len()
}
}).collect();
loop {
if libc::writev(self.fd, &iov[0], data.len() as libc::c_int) >= 0 {
break;
} else {
let err = Error::last_os_error();
match err.kind() {
ErrorKind::Interrupted => (),
_ => return {
warn!("Unexpected error occured during CRL file write: {}", err);
Err(err)
}
}
}
}
if!( cfg!(target_os="linux") || cfg!(target_os="macos") ) {
libc::fsync(self.fd);
}
}
self.len += wsize;
Ok(())
}
pub(super) fn recycle(&mut self) -> Result<()> {
info!("Recycling {}", self);
seek(self.fd, 0, libc::SEEK_SET)?;
unsafe {
libc::ftruncate(self.fd, 0);
}
self.file_uuid = uuid::Uuid::new_v4();
self.len = 16;
write_bytes(self.fd, &self.file_uuid.as_bytes()[..])?;
Ok(())
}
}
fn pread_bytes(fd: libc::c_int, s: &mut [u8], offset: usize) -> Result<()> {
if s.len() == 0 {
Ok(())
} else {
let p: *mut u8 = &mut s[0];
unsafe {
if libc::pread(fd, p as *mut libc::c_void, s.len(), offset as libc::off_t) < 0 {
Err(Error::last_os_error())
} else {
Ok(())
}
}
}
}
fn pread_uuid(fd: libc::c_int, offset: usize) -> Result<uuid::Uuid> {
let mut buf: [u8; 16] = [0; 16];
pread_bytes(fd, &mut buf[..], offset)?;
Ok(uuid::Uuid::from_bytes(buf))
}
fn write_bytes(fd: libc::c_int, s: &[u8]) -> Result<()> |
fn seek(fd: libc::c_int, offset: i64, whence: libc::c_int) -> Result<usize> {
unsafe {
let sz = libc::lseek(fd, offset, whence);
if sz < 0 {
Err(Error::last_os_error())
} else {
Ok(sz as usize)
}
}
}
fn find_last_valid_entry(
fd: libc::c_int,
file_size: usize,
file_uuid: &uuid::Uuid) -> Result<Option<(LogEntrySerialNumber, usize)>> {
let mut offset = file_size - (file_size % 4096);
let mut last = None;
while offset > 32 && last.is_none() {
let test_uuid = pread_uuid(fd, offset - 16)?;
if test_uuid == *file_uuid {
let entry_offset = offset - STATIC_ENTRY_SIZE as usize;
let mut serial_bytes: [u8; 8] = [0; 8];
pread_bytes(fd, &mut serial_bytes[..], entry_offset)?;
let serial = u64::from_le_bytes(serial_bytes);
last = Some((LogEntrySerialNumber(serial), entry_offset));
break;
}
offset -= 4096;
}
//println!("LAST: {:?} file size {} offset {}", last, file_size, (file_size - (file_size % 4096)));
Ok(last)
}
pub(super) fn recover(
crl_directory: &Path,
max_file_size: usize,
num_streams: usize) -> Result<RecoveredCrlState> {
let mut raw_files = Vec::<(LogFile, Option<(LogEntrySerialNumber, usize)>)>::new();
for i in 0.. num_streams * 3 {
let f = LogFile::new(crl_directory, FileId(i as u16), max_file_size)?;
raw_files.push(f);
}
let mut last: Option<(FileId, LogEntrySerialNumber, usize)> = None;
for t in &raw_files {
if let Some((serial, offset)) = &t.1 {
if let Some((_, cur_serial, _)) = &last {
if serial > cur_serial {
last = Some((t.0.file_id, *serial, *offset));
}
} else {
last = Some((t.0.file_id, *serial, *offset))
}
}
}
let mut files: Vec<(LogFile, Option<LogEntrySerialNumber>)> = Vec::new();
for t in raw_files {
files.push((t.0, t.1.map(|x| x.0)));
}
let mut tx: Vec<RecoveredTx> = Vec::new();
let mut alloc: Vec<RecoveredAlloc> = Vec::new();
let mut last_entry_serial = LogEntrySerialNumber(0);
let mut last_entry_location = FileLocation {
file_id: FileId(0),
offset: 0,
length: 0
};
if let Some((last_file_id, last_serial, last_offset)) = last {
last_entry_serial = last_serial;
last_entry_location = FileLocation {
file_id: last_file_id,
offset: last_offset as u64,
length: STATIC_ENTRY_SIZE as u32
};
let mut transactions: HashMap<TxId, RecoveringTx> = HashMap::new();
let mut allocations: HashMap<TxId, RecoveringAlloc> = HashMap::new();
let mut deleted_tx: HashSet<TxId> = HashSet::new();
let mut deleted_alloc: HashSet<TxId> = HashSet::new();
let mut file_id = last_file_id;
let mut entry_serial = last_serial;
let mut entry_block_offset = last_offset;
let earliest_serial_needed = {
let mut d = files[last_file_id.0 as usize].0.read(last_offset, STATIC_ENTRY_SIZE as usize)?;
let entry = encoding::decode_entry(&mut d)?;
LogEntrySerialNumber(entry.earliest_needed)
};
while entry_serial >= earliest_serial_needed {
let file = &files[file_id.0 as usize].0;
let mut d = file.read(entry_block_offset, STATIC_ENTRY_SIZE as usize)?;
let mut entry = encoding::decode_entry(&mut d)?;
entry_serial = entry.serial;
//println!("Reading Entry {:?} entry_block_offset {} entry offset {}", entry_serial, entry_block_offset, entry.entry_offset);
let entry_data_size = entry_block_offset - entry.entry_offset as usize;
let mut entry_data = file.read(entry.entry_offset as usize, entry_data_size)?;
encoding::load_entry_data(&mut entry_data, &mut entry, entry_serial)?;
for txid in &entry.tx_deletions {
deleted_tx.insert(*txid);
}
for txid in &entry.alloc_deletions {
deleted_alloc.insert(*txid);
}
for rtx in entry.transactions {
if! deleted_tx.contains(&rtx.id) &&! transactions.contains_key(&rtx.id) {
transactions.insert(rtx.id, rtx);
}
}
for ra in entry.allocations {
if! deleted_alloc.contains(&ra.id) &&! allocations.contains_key(&ra.id) {
allocations.insert(ra.id, ra);
}
}
if entry.previous_entry_location.offset < 16 {
break; // Cannot have an offset of 0 (first 16 bytes of the file are the UUID)
}
file_id = entry.previous_entry_location.file_id;
entry_block_offset = entry.previous_entry_location.offset as usize;
}
let get_data = |file_location: &FileLocation| -> Result<ArcData> {
let d = files[file_location.file_id.0 as usize].0.read(file_location.offset as usize, file_location.length as usize)?;
Ok(d.into())
};
let get_slice = |file_location: &FileLocation| -> Result<ArcDataSlice> {
let d = get_data(file_location)?;
Ok(d.into())
};
for (txid, rtx) in transactions {
let mut ou: Vec<transaction::ObjectUpdate> = Vec::with_capacity(rtx.object_updates.len());
for t in &rtx.object_updates {
ou.push(transaction::ObjectUpdate {
object_id: object::Id(t.0),
data: get_slice(&t.1)?
});
}
tx.push( RecoveredTx {
id: txid,
txd_location: rtx.serialized_transaction_description,
serialized_transaction_description: get_data(&rtx.serialized_transaction_description)?,
object_updates: ou,
update_locations: rtx.object_updates,
tx_disposition: rtx.tx_disposition,
paxos_state: rtx.paxos_state,
last_entry_serial: rtx.last_entry_serial
});
}
for (txid, ra) in allocations {
alloc.push(RecoveredAlloc{
id: txid,
store_pointer: ra.store_pointer,
object_id: ra.object_id,
kind: ra.kind,
size: ra.size,
data_location: ra.data,
data: get_data(&ra.data)?,
refcount: ra.refcount,
timestamp: ra.timestamp,
serialized_revision_guard: ra.serialized_revision_guard,
last_entry_serial: ra.last_entry_serial
});
}
};
Ok(RecoveredCrlState {
log_files: files,
transactions: tx,
allocations: alloc,
last_entry_serial,
last_entry_location
})
}
#[cfg(test)]
mod tests {
use tempdir::TempDir;
use super::*;
#[test]
fn initialization() {
let t = TempDir::new("test").unwrap();
let (l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
#[test]
fn recycle() {
let t = TempDir::new("test").unwrap();
let (mut l, o) = LogFile::new(t.path(), FileId(0), 50).unwrap();
let u = l.file_uuid;
assert!(o.is_none());
assert_eq!(l.len, 16);
let ru = pread_uuid(l.fd, 0).unwrap();
assert_eq!(u, ru);
write_bytes(l.fd, &[1u8,2u8,3u8,4u8]).unwrap();
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 20);
}
l.recycle().unwrap();
let ru = pread_uuid(l.fd, 0).unwrap();
assert_ne!(u, ru);
assert_eq!(l.file_uuid, ru);
unsafe {
let n = libc::lseek(l.fd, 0, libc::SEEK_END);
assert_eq!(n, 16);
}
}
}
| {
let p: *const u8 = &s[0];
unsafe {
if libc::write(fd, p as *const libc::c_void, s.len()) < 0 {
return Err(Error::last_os_error());
}
libc::fsync(fd);
}
Ok(())
} | identifier_body |
symm_icon.rs | // Symmetric Icons
#![allow(dead_code)]
use array2d::*;
// lambda, alpha, beta, gamma, omega, symmetry, scale
const PRESETS: [[f32; 7]; 36] = [
[1.56, -1., 0.1, -0.82, -0.3, 3., 1.7], [-1.806, 1.806, 0., 1.5, 0., 7., 1.1],
[2.4, -2.5, -0.9, 0.9, 0., 3., 1.5], [-2.7, 5., 1.5, 1., 0., 4., 1.],
[-2.5, 8., -0.7, 1., 0., 5., 0.8], [-1.9, 1.806, -0.85, 1.8, 0., 7., 1.2],
[2.409, -2.5, 0., 0.9, 0., 4., 1.4], [-1.806, 1.807, -0.07, 1.08, 0., 6., 1.2],
[-2.34, 2.2, 0.4, 0.05, 0., 5., 1.2], [-2.57, 3.2, 1.2, -1.75, 0., 36., 1.2],
[-2.6, 4., 1.5, 1., 0., 12., 1.1], [-2.2, 2.3, 0.55, -0.90, 0., 3., 1.3],
[-2.205, 6.01, 13.5814, -0.2044, 0.011, 5., 0.8],
[-2.7, 8.7, 13.86, -0.13, -0.18, 18., 0.8], [-2.52, 8.75, 12., 0.04, 0.18, 5., 0.8],
[2.38, -4.18, 19.99, -0.69, 0.095, 17., 1.], [2.33, -8.22, -6.07, -0.52, 0.16, 4., 0.8],
[-1.62, 2.049, 1.422, 1.96, 0.56, 6., 1.], [-1.89, 9.62, 1.95, 0.51, 0.21, 3., 0.6],
[-1.65, 9.99, 1.57, 1.46, -0.55, 3., 0.8], [-2.7, 5., 1.5, 1., 0., 6., 1.],
[-2.08, 1., -0.1, 0.167, 0., 7., 1.3], [1.56, -1., 0.1, -0.82, 0.12, 3., 1.6],
[-1.806, 1.806, 0., 1., 0., 5., 1.1], [1.56, -1., 0.1, -0.82, 0., 3., 1.3],
[-2.195, 10., -12., 1., 0., 3., 0.7], [-1.86, 2., 0., 1., 0.1, 4., 1.2],
[-2.34, 2., 0.2, 0.1, 0., 5., 1.2], [2.6, -2., 0., 0.5, 0., 5., 1.3],
[-2.5, 5., -1.9, 1., 0.188, 5., 1.], [2.409, -2.5, 0., 0.9, 0., 23., 1.2],
[2.409, -2.5, -0.2, 0.81, 0., 24., 1.2], [-2.05, 3., -16.79, 1., 0., 9., 1.],
[-2.32, 2.32, 0., 0.75, 0., 5., 1.2], [2.5, -2.5, 0., 0.9, 0., 3., 1.3],
[1.5, -1., 0.1, -0.805, 0., 3., 1.4],
];
const MAX_XY : f32 = 1e5;
const DEFAULT_SPEED : u32 = 100;
const MAX_COLORS : u32 = 2111;
const COLOR_SPEED : u32 = 3071;
pub struct SymmetricIcons {
lambda : f32,
alpha : f32,
beta : f32,
gamma : f32,
omega : f32,
symmetry : u32,
scale : f32,
w : usize,
h : usize,
color_set : u32,
iter : u32,
speed : u32,
apcx : f32,
apcy : f32,
rad : f32,
color_list: Vec<u32>,
icon : Array2D<u32>,
image : Array2D<u32>,
x : f32,
y : f32,
k : u32,
}
impl SymmetricIcons {
pub fn new(w : usize, h : usize, color_set : u32) -> Self {
let mut s = Self {
lambda : 0.0,
alpha : 0.0,
beta : 0.0,
gamma : 0.0,
omega : 0.0,
symmetry : 0,
scale : 0.0,
w : w,
h : h,
color_set : color_set,
iter : 0,
speed : DEFAULT_SPEED,
apcx : 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h;
self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h);
self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) |
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
5 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
6 => for i in 0..256 { colors[(i + 64)] = Self::make_colora(255, 255 - i as u32, 255 - i as u32, 255) },
7 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255) },
8 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255 - i as u32) },
9 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255, 255 - i as u32) },
10 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255 - i as u32)},
11 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255)},
_ => ()
}
if param_int > 5 {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 4 * i as u32) }
for j in 0..5 {
for i in 0..256 {
colors[(320 + j * 256 + i)] = Self::get_rainbow((param_int + j as u32) % 6, i as u32)
}
}
for i in 0..256 {
let local_color = Self::get_rainbow((param_int - 1) % 6, i);
colors[(1600 + 2 * i as usize)] = local_color;
colors[(1601 + 2 * i as usize)] = local_color;
}
} else { // <= 5
for j in 0..5 {
for i in 0..256 {
colors[64 + j * 256 + i] = Self::get_rainbow((param_int + j as u32) % 6, i as u32);
}
}
}
self.color_list = colors
}
fn reset(&mut self) {
self.speed = DEFAULT_SPEED;
self.apcx = self.w as f32 / 2.;
self.apcy = self.h as f32 / 2.;
self.rad = if self.apcx > self.apcy {self.apcy} else {self.apcx};
self.k = 0;
self.x = 0.01;
self.y = 0.003;
self.iter = 0;
self.icon = Array2D::filled_with(0_u32, self.w, self.h);
self.image = Array2D::filled_with(0_u32, self.w, self.h);
self.set_colors(self.color_set);
for m in 0..self.w {
for n in 0..self.h {
let color = self.get_color(self.icon[(m, n)]);
self.set_point_color(m, n, color);
}
}
}
fn set_point_color(&mut self, x : usize, y : usize, color : u32) {
self.image[(x, y)] = color;
}
fn get_color(&mut self, col : u32) -> u32 {
let col = col & 0x00ffffff;
if col * self.speed > MAX_COLORS {
while (col * self.speed > COLOR_SPEED) && (self.speed > 3) { self.speed-=1 }
self.color_list[MAX_COLORS as usize]
} else {
self.color_list[(col * self.speed) as usize]
}
}
fn set_point(&mut self, x : usize, y : usize) {
let icon = self.icon[(x,y)];
let color = self.get_color(icon);
self.image[(x,y)] = color;
self.icon[(x,y)] += 1;
if icon >= 12288 { self.icon[(x,y)] = 8192 }
}
pub fn generate(&mut self, mod_disp : u32) -> bool { // geenrate icon, runs in a thread in'start'
self.iter+=1;
if self.x.abs() > MAX_XY || self.y.abs() > MAX_XY {
self.reset(); // prevent overflow
}
// generate new x,y
let sq = self.x * self.x + self.y * self.y; // sq=x^2+y^2
let mut tx = self.x;
let mut ty = self.y; // tx=pow, ty=pow
for _m in 1..self.symmetry - 2 + 1 {
let sqx = tx * self.x - ty * self.y;
let sqy = ty * self.x + tx * self.y;
tx = sqx;
ty = sqy;
}
let sqx = self.x * tx - self.y * ty;
let tmp = self.lambda + self.alpha * sq + self.beta * sqx;
let x_new = tmp * self.x +self.gamma * tx - self.omega * self.y;
let y_new = tmp * self.y - self.gamma * ty + self.omega * self.x;
self.x = x_new;
self.y = y_new;
if self.k > 50 {
self.set_point((self.apcx + self.x * self.rad / self.scale) as usize,
(self.apcy + self.y * self.rad / self.scale) as usize);
} else {
self.k += 1;
}
self.iter % mod_disp == 0
}
pub fn build(&mut self, preset : usize, n_iters : usize) -> (&[u8], (usize, usize)) {
self.set_preset(preset);
for _ in 0..n_iters { self.generate(1); }
( self.get_image(), self.get_size() )
}
pub fn get_size(&self) -> (usize, usize) {
( self.w, self.h )
}
pub fn write(&self, name : &str) { // showbinimage.py 800 800 symm_icon.bin
use std::fs::File;
use std::io::prelude::*;
File::create(name).expect("create failed")
.write_all(self.get_image()).expect("write failed");
}
pub fn get_image(&self) -> &[u8] { // convert Vec<u32> to [u8]
let v = self.image.as_row_major();
unsafe {
std::slice::from_raw_parts(
v.as_ptr() as *const u8,
v.len() * std::mem::size_of::<u32>(),
)
}
}
}
pub fn _test_symmetric_icon() {
let n = 2048;
let mut symicn = SymmetricIcons::new(n, n, 0);
symicn.set_preset(9);
for _i in 0..900_000 {
symicn.generate(5000);
}
symicn.write("symm_icon.bin");
use std::process::Command;
let n = &n.to_string()[..];
Command::new("/usr/local/bin/showbinimage.py")
.args(&[n, n, "symm_icon.bin"])
.output().expect("can't execute command");
}
pub fn _test_array2d() { // Array2D is faster 1.67 vs 2.14 (1.3 times faster)
use std::time::Instant;
const N : usize = 100_000_000;
const SZ : usize = 1200;
let v = vec![0_usize; SZ*SZ];
let a = Array2D::filled_with(0_usize, SZ, SZ);
let t = Instant::now();
for _ in 0..N {
for i in 0..SZ {
for j in 0..SZ {
let crd = i*SZ+j;
let x = v[crd];
let _xx = x+1;
}
}
}
println!("lap vec : {:?}", Instant::now()-t);
let t = Instant::now();
for _ in 0..N {
for r in 0..SZ {
for c in 0..SZ {
let x = a[(r,c)];
let _xx = x+1;
}
}
}
println!("lap array2d : {:?}", Instant::now()-t);
} | {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
} | identifier_body |
symm_icon.rs | // Symmetric Icons
#![allow(dead_code)]
use array2d::*;
// lambda, alpha, beta, gamma, omega, symmetry, scale
const PRESETS: [[f32; 7]; 36] = [
[1.56, -1., 0.1, -0.82, -0.3, 3., 1.7], [-1.806, 1.806, 0., 1.5, 0., 7., 1.1],
[2.4, -2.5, -0.9, 0.9, 0., 3., 1.5], [-2.7, 5., 1.5, 1., 0., 4., 1.],
[-2.5, 8., -0.7, 1., 0., 5., 0.8], [-1.9, 1.806, -0.85, 1.8, 0., 7., 1.2],
[2.409, -2.5, 0., 0.9, 0., 4., 1.4], [-1.806, 1.807, -0.07, 1.08, 0., 6., 1.2],
[-2.34, 2.2, 0.4, 0.05, 0., 5., 1.2], [-2.57, 3.2, 1.2, -1.75, 0., 36., 1.2],
[-2.6, 4., 1.5, 1., 0., 12., 1.1], [-2.2, 2.3, 0.55, -0.90, 0., 3., 1.3],
[-2.205, 6.01, 13.5814, -0.2044, 0.011, 5., 0.8],
[-2.7, 8.7, 13.86, -0.13, -0.18, 18., 0.8], [-2.52, 8.75, 12., 0.04, 0.18, 5., 0.8],
[2.38, -4.18, 19.99, -0.69, 0.095, 17., 1.], [2.33, -8.22, -6.07, -0.52, 0.16, 4., 0.8],
[-1.62, 2.049, 1.422, 1.96, 0.56, 6., 1.], [-1.89, 9.62, 1.95, 0.51, 0.21, 3., 0.6],
[-1.65, 9.99, 1.57, 1.46, -0.55, 3., 0.8], [-2.7, 5., 1.5, 1., 0., 6., 1.],
[-2.08, 1., -0.1, 0.167, 0., 7., 1.3], [1.56, -1., 0.1, -0.82, 0.12, 3., 1.6],
[-1.806, 1.806, 0., 1., 0., 5., 1.1], [1.56, -1., 0.1, -0.82, 0., 3., 1.3],
[-2.195, 10., -12., 1., 0., 3., 0.7], [-1.86, 2., 0., 1., 0.1, 4., 1.2],
[-2.34, 2., 0.2, 0.1, 0., 5., 1.2], [2.6, -2., 0., 0.5, 0., 5., 1.3],
[-2.5, 5., -1.9, 1., 0.188, 5., 1.], [2.409, -2.5, 0., 0.9, 0., 23., 1.2],
[2.409, -2.5, -0.2, 0.81, 0., 24., 1.2], [-2.05, 3., -16.79, 1., 0., 9., 1.],
[-2.32, 2.32, 0., 0.75, 0., 5., 1.2], [2.5, -2.5, 0., 0.9, 0., 3., 1.3],
[1.5, -1., 0.1, -0.805, 0., 3., 1.4],
];
const MAX_XY : f32 = 1e5;
const DEFAULT_SPEED : u32 = 100;
const MAX_COLORS : u32 = 2111;
const COLOR_SPEED : u32 = 3071;
pub struct SymmetricIcons {
lambda : f32,
alpha : f32,
beta : f32,
gamma : f32,
omega : f32,
symmetry : u32,
scale : f32,
w : usize,
h : usize,
color_set : u32,
iter : u32,
speed : u32,
apcx : f32,
apcy : f32,
rad : f32,
color_list: Vec<u32>,
icon : Array2D<u32>,
image : Array2D<u32>,
x : f32,
y : f32,
k : u32,
}
impl SymmetricIcons {
pub fn new(w : usize, h : usize, color_set : u32) -> Self {
let mut s = Self {
lambda : 0.0,
alpha : 0.0,
beta : 0.0,
gamma : 0.0,
omega : 0.0,
symmetry : 0,
scale : 0.0,
w : w,
h : h,
color_set : color_set,
iter : 0,
speed : DEFAULT_SPEED,
apcx : 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h; | self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
}
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
5 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
6 => for i in 0..256 { colors[(i + 64)] = Self::make_colora(255, 255 - i as u32, 255 - i as u32, 255) },
7 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255) },
8 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255 - i as u32) },
9 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255, 255 - i as u32) },
10 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255 - i as u32)},
11 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255)},
_ => ()
}
if param_int > 5 {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 4 * i as u32) }
for j in 0..5 {
for i in 0..256 {
colors[(320 + j * 256 + i)] = Self::get_rainbow((param_int + j as u32) % 6, i as u32)
}
}
for i in 0..256 {
let local_color = Self::get_rainbow((param_int - 1) % 6, i);
colors[(1600 + 2 * i as usize)] = local_color;
colors[(1601 + 2 * i as usize)] = local_color;
}
} else { // <= 5
for j in 0..5 {
for i in 0..256 {
colors[64 + j * 256 + i] = Self::get_rainbow((param_int + j as u32) % 6, i as u32);
}
}
}
self.color_list = colors
}
fn reset(&mut self) {
self.speed = DEFAULT_SPEED;
self.apcx = self.w as f32 / 2.;
self.apcy = self.h as f32 / 2.;
self.rad = if self.apcx > self.apcy {self.apcy} else {self.apcx};
self.k = 0;
self.x = 0.01;
self.y = 0.003;
self.iter = 0;
self.icon = Array2D::filled_with(0_u32, self.w, self.h);
self.image = Array2D::filled_with(0_u32, self.w, self.h);
self.set_colors(self.color_set);
for m in 0..self.w {
for n in 0..self.h {
let color = self.get_color(self.icon[(m, n)]);
self.set_point_color(m, n, color);
}
}
}
fn set_point_color(&mut self, x : usize, y : usize, color : u32) {
self.image[(x, y)] = color;
}
fn get_color(&mut self, col : u32) -> u32 {
let col = col & 0x00ffffff;
if col * self.speed > MAX_COLORS {
while (col * self.speed > COLOR_SPEED) && (self.speed > 3) { self.speed-=1 }
self.color_list[MAX_COLORS as usize]
} else {
self.color_list[(col * self.speed) as usize]
}
}
fn set_point(&mut self, x : usize, y : usize) {
let icon = self.icon[(x,y)];
let color = self.get_color(icon);
self.image[(x,y)] = color;
self.icon[(x,y)] += 1;
if icon >= 12288 { self.icon[(x,y)] = 8192 }
}
pub fn generate(&mut self, mod_disp : u32) -> bool { // geenrate icon, runs in a thread in'start'
self.iter+=1;
if self.x.abs() > MAX_XY || self.y.abs() > MAX_XY {
self.reset(); // prevent overflow
}
// generate new x,y
let sq = self.x * self.x + self.y * self.y; // sq=x^2+y^2
let mut tx = self.x;
let mut ty = self.y; // tx=pow, ty=pow
for _m in 1..self.symmetry - 2 + 1 {
let sqx = tx * self.x - ty * self.y;
let sqy = ty * self.x + tx * self.y;
tx = sqx;
ty = sqy;
}
let sqx = self.x * tx - self.y * ty;
let tmp = self.lambda + self.alpha * sq + self.beta * sqx;
let x_new = tmp * self.x +self.gamma * tx - self.omega * self.y;
let y_new = tmp * self.y - self.gamma * ty + self.omega * self.x;
self.x = x_new;
self.y = y_new;
if self.k > 50 {
self.set_point((self.apcx + self.x * self.rad / self.scale) as usize,
(self.apcy + self.y * self.rad / self.scale) as usize);
} else {
self.k += 1;
}
self.iter % mod_disp == 0
}
pub fn build(&mut self, preset : usize, n_iters : usize) -> (&[u8], (usize, usize)) {
self.set_preset(preset);
for _ in 0..n_iters { self.generate(1); }
( self.get_image(), self.get_size() )
}
pub fn get_size(&self) -> (usize, usize) {
( self.w, self.h )
}
pub fn write(&self, name : &str) { // showbinimage.py 800 800 symm_icon.bin
use std::fs::File;
use std::io::prelude::*;
File::create(name).expect("create failed")
.write_all(self.get_image()).expect("write failed");
}
pub fn get_image(&self) -> &[u8] { // convert Vec<u32> to [u8]
let v = self.image.as_row_major();
unsafe {
std::slice::from_raw_parts(
v.as_ptr() as *const u8,
v.len() * std::mem::size_of::<u32>(),
)
}
}
}
pub fn _test_symmetric_icon() {
let n = 2048;
let mut symicn = SymmetricIcons::new(n, n, 0);
symicn.set_preset(9);
for _i in 0..900_000 {
symicn.generate(5000);
}
symicn.write("symm_icon.bin");
use std::process::Command;
let n = &n.to_string()[..];
Command::new("/usr/local/bin/showbinimage.py")
.args(&[n, n, "symm_icon.bin"])
.output().expect("can't execute command");
}
pub fn _test_array2d() { // Array2D is faster 1.67 vs 2.14 (1.3 times faster)
use std::time::Instant;
const N : usize = 100_000_000;
const SZ : usize = 1200;
let v = vec![0_usize; SZ*SZ];
let a = Array2D::filled_with(0_usize, SZ, SZ);
let t = Instant::now();
for _ in 0..N {
for i in 0..SZ {
for j in 0..SZ {
let crd = i*SZ+j;
let x = v[crd];
let _xx = x+1;
}
}
}
println!("lap vec : {:?}", Instant::now()-t);
let t = Instant::now();
for _ in 0..N {
for r in 0..SZ {
for c in 0..SZ {
let x = a[(r,c)];
let _xx = x+1;
}
}
}
println!("lap array2d : {:?}", Instant::now()-t);
} | self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h); | random_line_split |
symm_icon.rs | // Symmetric Icons
#![allow(dead_code)]
use array2d::*;
// lambda, alpha, beta, gamma, omega, symmetry, scale
const PRESETS: [[f32; 7]; 36] = [
[1.56, -1., 0.1, -0.82, -0.3, 3., 1.7], [-1.806, 1.806, 0., 1.5, 0., 7., 1.1],
[2.4, -2.5, -0.9, 0.9, 0., 3., 1.5], [-2.7, 5., 1.5, 1., 0., 4., 1.],
[-2.5, 8., -0.7, 1., 0., 5., 0.8], [-1.9, 1.806, -0.85, 1.8, 0., 7., 1.2],
[2.409, -2.5, 0., 0.9, 0., 4., 1.4], [-1.806, 1.807, -0.07, 1.08, 0., 6., 1.2],
[-2.34, 2.2, 0.4, 0.05, 0., 5., 1.2], [-2.57, 3.2, 1.2, -1.75, 0., 36., 1.2],
[-2.6, 4., 1.5, 1., 0., 12., 1.1], [-2.2, 2.3, 0.55, -0.90, 0., 3., 1.3],
[-2.205, 6.01, 13.5814, -0.2044, 0.011, 5., 0.8],
[-2.7, 8.7, 13.86, -0.13, -0.18, 18., 0.8], [-2.52, 8.75, 12., 0.04, 0.18, 5., 0.8],
[2.38, -4.18, 19.99, -0.69, 0.095, 17., 1.], [2.33, -8.22, -6.07, -0.52, 0.16, 4., 0.8],
[-1.62, 2.049, 1.422, 1.96, 0.56, 6., 1.], [-1.89, 9.62, 1.95, 0.51, 0.21, 3., 0.6],
[-1.65, 9.99, 1.57, 1.46, -0.55, 3., 0.8], [-2.7, 5., 1.5, 1., 0., 6., 1.],
[-2.08, 1., -0.1, 0.167, 0., 7., 1.3], [1.56, -1., 0.1, -0.82, 0.12, 3., 1.6],
[-1.806, 1.806, 0., 1., 0., 5., 1.1], [1.56, -1., 0.1, -0.82, 0., 3., 1.3],
[-2.195, 10., -12., 1., 0., 3., 0.7], [-1.86, 2., 0., 1., 0.1, 4., 1.2],
[-2.34, 2., 0.2, 0.1, 0., 5., 1.2], [2.6, -2., 0., 0.5, 0., 5., 1.3],
[-2.5, 5., -1.9, 1., 0.188, 5., 1.], [2.409, -2.5, 0., 0.9, 0., 23., 1.2],
[2.409, -2.5, -0.2, 0.81, 0., 24., 1.2], [-2.05, 3., -16.79, 1., 0., 9., 1.],
[-2.32, 2.32, 0., 0.75, 0., 5., 1.2], [2.5, -2.5, 0., 0.9, 0., 3., 1.3],
[1.5, -1., 0.1, -0.805, 0., 3., 1.4],
];
const MAX_XY : f32 = 1e5;
const DEFAULT_SPEED : u32 = 100;
const MAX_COLORS : u32 = 2111;
const COLOR_SPEED : u32 = 3071;
pub struct SymmetricIcons {
lambda : f32,
alpha : f32,
beta : f32,
gamma : f32,
omega : f32,
symmetry : u32,
scale : f32,
w : usize,
h : usize,
color_set : u32,
iter : u32,
speed : u32,
apcx : f32,
apcy : f32,
rad : f32,
color_list: Vec<u32>,
icon : Array2D<u32>,
image : Array2D<u32>,
x : f32,
y : f32,
k : u32,
}
impl SymmetricIcons {
pub fn new(w : usize, h : usize, color_set : u32) -> Self {
let mut s = Self {
lambda : 0.0,
alpha : 0.0,
beta : 0.0,
gamma : 0.0,
omega : 0.0,
symmetry : 0,
scale : 0.0,
w : w,
h : h,
color_set : color_set,
iter : 0,
speed : DEFAULT_SPEED,
apcx : 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h;
self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h);
self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
}
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
5 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
6 => for i in 0..256 { colors[(i + 64)] = Self::make_colora(255, 255 - i as u32, 255 - i as u32, 255) },
7 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255) },
8 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255 - i as u32) },
9 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255, 255 - i as u32) },
10 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255 - i as u32)},
11 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255)},
_ => ()
}
if param_int > 5 {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 4 * i as u32) }
for j in 0..5 {
for i in 0..256 {
colors[(320 + j * 256 + i)] = Self::get_rainbow((param_int + j as u32) % 6, i as u32)
}
}
for i in 0..256 {
let local_color = Self::get_rainbow((param_int - 1) % 6, i);
colors[(1600 + 2 * i as usize)] = local_color;
colors[(1601 + 2 * i as usize)] = local_color;
}
} else { // <= 5
for j in 0..5 {
for i in 0..256 {
colors[64 + j * 256 + i] = Self::get_rainbow((param_int + j as u32) % 6, i as u32);
}
}
}
self.color_list = colors
}
fn reset(&mut self) {
self.speed = DEFAULT_SPEED;
self.apcx = self.w as f32 / 2.;
self.apcy = self.h as f32 / 2.;
self.rad = if self.apcx > self.apcy {self.apcy} else {self.apcx};
self.k = 0;
self.x = 0.01;
self.y = 0.003;
self.iter = 0;
self.icon = Array2D::filled_with(0_u32, self.w, self.h);
self.image = Array2D::filled_with(0_u32, self.w, self.h);
self.set_colors(self.color_set);
for m in 0..self.w {
for n in 0..self.h {
let color = self.get_color(self.icon[(m, n)]);
self.set_point_color(m, n, color);
}
}
}
fn set_point_color(&mut self, x : usize, y : usize, color : u32) {
self.image[(x, y)] = color;
}
fn get_color(&mut self, col : u32) -> u32 {
let col = col & 0x00ffffff;
if col * self.speed > MAX_COLORS {
while (col * self.speed > COLOR_SPEED) && (self.speed > 3) { self.speed-=1 }
self.color_list[MAX_COLORS as usize]
} else |
}
fn set_point(&mut self, x : usize, y : usize) {
let icon = self.icon[(x,y)];
let color = self.get_color(icon);
self.image[(x,y)] = color;
self.icon[(x,y)] += 1;
if icon >= 12288 { self.icon[(x,y)] = 8192 }
}
pub fn generate(&mut self, mod_disp : u32) -> bool { // geenrate icon, runs in a thread in'start'
self.iter+=1;
if self.x.abs() > MAX_XY || self.y.abs() > MAX_XY {
self.reset(); // prevent overflow
}
// generate new x,y
let sq = self.x * self.x + self.y * self.y; // sq=x^2+y^2
let mut tx = self.x;
let mut ty = self.y; // tx=pow, ty=pow
for _m in 1..self.symmetry - 2 + 1 {
let sqx = tx * self.x - ty * self.y;
let sqy = ty * self.x + tx * self.y;
tx = sqx;
ty = sqy;
}
let sqx = self.x * tx - self.y * ty;
let tmp = self.lambda + self.alpha * sq + self.beta * sqx;
let x_new = tmp * self.x +self.gamma * tx - self.omega * self.y;
let y_new = tmp * self.y - self.gamma * ty + self.omega * self.x;
self.x = x_new;
self.y = y_new;
if self.k > 50 {
self.set_point((self.apcx + self.x * self.rad / self.scale) as usize,
(self.apcy + self.y * self.rad / self.scale) as usize);
} else {
self.k += 1;
}
self.iter % mod_disp == 0
}
pub fn build(&mut self, preset : usize, n_iters : usize) -> (&[u8], (usize, usize)) {
self.set_preset(preset);
for _ in 0..n_iters { self.generate(1); }
( self.get_image(), self.get_size() )
}
pub fn get_size(&self) -> (usize, usize) {
( self.w, self.h )
}
pub fn write(&self, name : &str) { // showbinimage.py 800 800 symm_icon.bin
use std::fs::File;
use std::io::prelude::*;
File::create(name).expect("create failed")
.write_all(self.get_image()).expect("write failed");
}
pub fn get_image(&self) -> &[u8] { // convert Vec<u32> to [u8]
let v = self.image.as_row_major();
unsafe {
std::slice::from_raw_parts(
v.as_ptr() as *const u8,
v.len() * std::mem::size_of::<u32>(),
)
}
}
}
pub fn _test_symmetric_icon() {
let n = 2048;
let mut symicn = SymmetricIcons::new(n, n, 0);
symicn.set_preset(9);
for _i in 0..900_000 {
symicn.generate(5000);
}
symicn.write("symm_icon.bin");
use std::process::Command;
let n = &n.to_string()[..];
Command::new("/usr/local/bin/showbinimage.py")
.args(&[n, n, "symm_icon.bin"])
.output().expect("can't execute command");
}
pub fn _test_array2d() { // Array2D is faster 1.67 vs 2.14 (1.3 times faster)
use std::time::Instant;
const N : usize = 100_000_000;
const SZ : usize = 1200;
let v = vec![0_usize; SZ*SZ];
let a = Array2D::filled_with(0_usize, SZ, SZ);
let t = Instant::now();
for _ in 0..N {
for i in 0..SZ {
for j in 0..SZ {
let crd = i*SZ+j;
let x = v[crd];
let _xx = x+1;
}
}
}
println!("lap vec : {:?}", Instant::now()-t);
let t = Instant::now();
for _ in 0..N {
for r in 0..SZ {
for c in 0..SZ {
let x = a[(r,c)];
let _xx = x+1;
}
}
}
println!("lap array2d : {:?}", Instant::now()-t);
} | {
self.color_list[(col * self.speed) as usize]
} | conditional_block |
symm_icon.rs | // Symmetric Icons
#![allow(dead_code)]
use array2d::*;
// lambda, alpha, beta, gamma, omega, symmetry, scale
const PRESETS: [[f32; 7]; 36] = [
[1.56, -1., 0.1, -0.82, -0.3, 3., 1.7], [-1.806, 1.806, 0., 1.5, 0., 7., 1.1],
[2.4, -2.5, -0.9, 0.9, 0., 3., 1.5], [-2.7, 5., 1.5, 1., 0., 4., 1.],
[-2.5, 8., -0.7, 1., 0., 5., 0.8], [-1.9, 1.806, -0.85, 1.8, 0., 7., 1.2],
[2.409, -2.5, 0., 0.9, 0., 4., 1.4], [-1.806, 1.807, -0.07, 1.08, 0., 6., 1.2],
[-2.34, 2.2, 0.4, 0.05, 0., 5., 1.2], [-2.57, 3.2, 1.2, -1.75, 0., 36., 1.2],
[-2.6, 4., 1.5, 1., 0., 12., 1.1], [-2.2, 2.3, 0.55, -0.90, 0., 3., 1.3],
[-2.205, 6.01, 13.5814, -0.2044, 0.011, 5., 0.8],
[-2.7, 8.7, 13.86, -0.13, -0.18, 18., 0.8], [-2.52, 8.75, 12., 0.04, 0.18, 5., 0.8],
[2.38, -4.18, 19.99, -0.69, 0.095, 17., 1.], [2.33, -8.22, -6.07, -0.52, 0.16, 4., 0.8],
[-1.62, 2.049, 1.422, 1.96, 0.56, 6., 1.], [-1.89, 9.62, 1.95, 0.51, 0.21, 3., 0.6],
[-1.65, 9.99, 1.57, 1.46, -0.55, 3., 0.8], [-2.7, 5., 1.5, 1., 0., 6., 1.],
[-2.08, 1., -0.1, 0.167, 0., 7., 1.3], [1.56, -1., 0.1, -0.82, 0.12, 3., 1.6],
[-1.806, 1.806, 0., 1., 0., 5., 1.1], [1.56, -1., 0.1, -0.82, 0., 3., 1.3],
[-2.195, 10., -12., 1., 0., 3., 0.7], [-1.86, 2., 0., 1., 0.1, 4., 1.2],
[-2.34, 2., 0.2, 0.1, 0., 5., 1.2], [2.6, -2., 0., 0.5, 0., 5., 1.3],
[-2.5, 5., -1.9, 1., 0.188, 5., 1.], [2.409, -2.5, 0., 0.9, 0., 23., 1.2],
[2.409, -2.5, -0.2, 0.81, 0., 24., 1.2], [-2.05, 3., -16.79, 1., 0., 9., 1.],
[-2.32, 2.32, 0., 0.75, 0., 5., 1.2], [2.5, -2.5, 0., 0.9, 0., 3., 1.3],
[1.5, -1., 0.1, -0.805, 0., 3., 1.4],
];
const MAX_XY : f32 = 1e5;
const DEFAULT_SPEED : u32 = 100;
const MAX_COLORS : u32 = 2111;
const COLOR_SPEED : u32 = 3071;
pub struct SymmetricIcons {
lambda : f32,
alpha : f32,
beta : f32,
gamma : f32,
omega : f32,
symmetry : u32,
scale : f32,
w : usize,
h : usize,
color_set : u32,
iter : u32,
speed : u32,
apcx : f32,
apcy : f32,
rad : f32,
color_list: Vec<u32>,
icon : Array2D<u32>,
image : Array2D<u32>,
x : f32,
y : f32,
k : u32,
}
impl SymmetricIcons {
pub fn new(w : usize, h : usize, color_set : u32) -> Self {
let mut s = Self {
lambda : 0.0,
alpha : 0.0,
beta : 0.0,
gamma : 0.0,
omega : 0.0,
symmetry : 0,
scale : 0.0,
w : w,
h : h,
color_set : color_set,
iter : 0,
speed : DEFAULT_SPEED,
apcx : 0.0,
apcy : 0.0,
rad : 0.0,
color_list : vec![],
icon : Array2D::filled_with(0_u32, w, h),
image : Array2D::filled_with(0_u32, w, h),
x : 0.0,
y : 0.0,
k : 0,
};
s.set_preset(0);
s
}
pub fn set_size(&mut self, w : usize, h : usize) {
self.w = w;
self.h = h;
self.image = Array2D::filled_with(0_u32, w, h);
self.icon = Array2D::filled_with(0_u32, w, h);
self.iter = 0;
self.color_list = vec![];
self.reset();
}
pub fn set_preset(&mut self, i : usize) {
let p = PRESETS[i % PRESETS.len()];
self.lambda = p[0];
self.alpha = p[1];
self.beta = p[2];
self.gamma = p[3];
self.omega = p[4];
self.symmetry = p[5] as u32;
self.scale = if p[6] == 0. {1.} else {p[6]};
self.reset();
}
pub fn set_parameters(&mut self, lambda : f32, alpha: f32, beta : f32, gamma : f32, omega : f32, symmetry : f32, scale : f32) {
self.lambda = lambda;
self.alpha = alpha;
self.beta = beta;
self.gamma = gamma;
self.omega = omega;
self.symmetry = if symmetry < 1. { 1 } else { symmetry as u32 };
self.scale = if scale == 0. {1.} else { scale };
self.reset();
}
fn make_color(r : u32, g : u32, b : u32) -> u32 { (b << 16) | (g << 8) | r | 0xff00_0000 }
fn make_colora(a : u32, r : u32, g : u32, b : u32) -> u32 { (a << 24) | (b << 16) | (g << 8) | r }
fn get_rainbow(x : u32, y : u32) -> u32 {
match x {
0 => Self::make_color(0, y, 255),
1 => Self::make_color(0, 255, 255 - y),
2 => Self::make_color(y, 255, 0),
3 => Self::make_color(255, 255 - y, 0),
4 => Self::make_color(255, 0, y),
5 => Self::make_color(255 - y, 0, 255),
_ => Self::make_color(0,0,0), // black
}
}
fn set_colors(&mut self, param_int : u32) {
let mut colors = vec![0_u32; (MAX_COLORS+1) as usize];
match param_int {
0 => {
for i in 0..64 { colors[i] = Self::make_color(0, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
1 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(i, i, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
2 => {
for i in 0..64 { colors[i] = Self::make_color(0, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, 255);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
3 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 0) }
for i in 0..256 {
let local_color = Self::make_color(i, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
4 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 0) }
for i in 0..256 {
let local_color = Self::make_color(255, 255, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
5 => {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 0, 4 * i as u32) }
for i in 0..256 {
let local_color = Self::make_color(255, i, i);
for j in 0..3 { colors[(1344 + j + 3 * i) as usize] = local_color }
}
}
6 => for i in 0..256 { colors[(i + 64)] = Self::make_colora(255, 255 - i as u32, 255 - i as u32, 255) },
7 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255) },
8 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255 - i as u32, 255, 255 - i as u32) },
9 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255, 255 - i as u32) },
10 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255 - i as u32)},
11 => for i in 0..256 { colors[(i + 64)] = Self::make_color(255, 255 - i as u32, 255)},
_ => ()
}
if param_int > 5 {
for i in 0..64 { colors[i] = Self::make_color(4 * i as u32, 4 * i as u32, 4 * i as u32) }
for j in 0..5 {
for i in 0..256 {
colors[(320 + j * 256 + i)] = Self::get_rainbow((param_int + j as u32) % 6, i as u32)
}
}
for i in 0..256 {
let local_color = Self::get_rainbow((param_int - 1) % 6, i);
colors[(1600 + 2 * i as usize)] = local_color;
colors[(1601 + 2 * i as usize)] = local_color;
}
} else { // <= 5
for j in 0..5 {
for i in 0..256 {
colors[64 + j * 256 + i] = Self::get_rainbow((param_int + j as u32) % 6, i as u32);
}
}
}
self.color_list = colors
}
fn | (&mut self) {
self.speed = DEFAULT_SPEED;
self.apcx = self.w as f32 / 2.;
self.apcy = self.h as f32 / 2.;
self.rad = if self.apcx > self.apcy {self.apcy} else {self.apcx};
self.k = 0;
self.x = 0.01;
self.y = 0.003;
self.iter = 0;
self.icon = Array2D::filled_with(0_u32, self.w, self.h);
self.image = Array2D::filled_with(0_u32, self.w, self.h);
self.set_colors(self.color_set);
for m in 0..self.w {
for n in 0..self.h {
let color = self.get_color(self.icon[(m, n)]);
self.set_point_color(m, n, color);
}
}
}
fn set_point_color(&mut self, x : usize, y : usize, color : u32) {
self.image[(x, y)] = color;
}
fn get_color(&mut self, col : u32) -> u32 {
let col = col & 0x00ffffff;
if col * self.speed > MAX_COLORS {
while (col * self.speed > COLOR_SPEED) && (self.speed > 3) { self.speed-=1 }
self.color_list[MAX_COLORS as usize]
} else {
self.color_list[(col * self.speed) as usize]
}
}
fn set_point(&mut self, x : usize, y : usize) {
let icon = self.icon[(x,y)];
let color = self.get_color(icon);
self.image[(x,y)] = color;
self.icon[(x,y)] += 1;
if icon >= 12288 { self.icon[(x,y)] = 8192 }
}
pub fn generate(&mut self, mod_disp : u32) -> bool { // geenrate icon, runs in a thread in'start'
self.iter+=1;
if self.x.abs() > MAX_XY || self.y.abs() > MAX_XY {
self.reset(); // prevent overflow
}
// generate new x,y
let sq = self.x * self.x + self.y * self.y; // sq=x^2+y^2
let mut tx = self.x;
let mut ty = self.y; // tx=pow, ty=pow
for _m in 1..self.symmetry - 2 + 1 {
let sqx = tx * self.x - ty * self.y;
let sqy = ty * self.x + tx * self.y;
tx = sqx;
ty = sqy;
}
let sqx = self.x * tx - self.y * ty;
let tmp = self.lambda + self.alpha * sq + self.beta * sqx;
let x_new = tmp * self.x +self.gamma * tx - self.omega * self.y;
let y_new = tmp * self.y - self.gamma * ty + self.omega * self.x;
self.x = x_new;
self.y = y_new;
if self.k > 50 {
self.set_point((self.apcx + self.x * self.rad / self.scale) as usize,
(self.apcy + self.y * self.rad / self.scale) as usize);
} else {
self.k += 1;
}
self.iter % mod_disp == 0
}
pub fn build(&mut self, preset : usize, n_iters : usize) -> (&[u8], (usize, usize)) {
self.set_preset(preset);
for _ in 0..n_iters { self.generate(1); }
( self.get_image(), self.get_size() )
}
pub fn get_size(&self) -> (usize, usize) {
( self.w, self.h )
}
pub fn write(&self, name : &str) { // showbinimage.py 800 800 symm_icon.bin
use std::fs::File;
use std::io::prelude::*;
File::create(name).expect("create failed")
.write_all(self.get_image()).expect("write failed");
}
pub fn get_image(&self) -> &[u8] { // convert Vec<u32> to [u8]
let v = self.image.as_row_major();
unsafe {
std::slice::from_raw_parts(
v.as_ptr() as *const u8,
v.len() * std::mem::size_of::<u32>(),
)
}
}
}
pub fn _test_symmetric_icon() {
let n = 2048;
let mut symicn = SymmetricIcons::new(n, n, 0);
symicn.set_preset(9);
for _i in 0..900_000 {
symicn.generate(5000);
}
symicn.write("symm_icon.bin");
use std::process::Command;
let n = &n.to_string()[..];
Command::new("/usr/local/bin/showbinimage.py")
.args(&[n, n, "symm_icon.bin"])
.output().expect("can't execute command");
}
pub fn _test_array2d() { // Array2D is faster 1.67 vs 2.14 (1.3 times faster)
use std::time::Instant;
const N : usize = 100_000_000;
const SZ : usize = 1200;
let v = vec![0_usize; SZ*SZ];
let a = Array2D::filled_with(0_usize, SZ, SZ);
let t = Instant::now();
for _ in 0..N {
for i in 0..SZ {
for j in 0..SZ {
let crd = i*SZ+j;
let x = v[crd];
let _xx = x+1;
}
}
}
println!("lap vec : {:?}", Instant::now()-t);
let t = Instant::now();
for _ in 0..N {
for r in 0..SZ {
for c in 0..SZ {
let x = a[(r,c)];
let _xx = x+1;
}
}
}
println!("lap array2d : {:?}", Instant::now()-t);
} | reset | identifier_name |
component.rs | use crate::code::CodeObject;
use crate::signatures::SignatureCollection;
use crate::{Engine, Module, ResourcesRequired};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::fs;
use std::mem;
use std::path::Path;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::component::{
AllCallFunc, ComponentTypes, GlobalInitializer, InstantiateModule, StaticModuleIndex,
TrampolineIndex, Translator, VMComponentOffsets,
};
use wasmtime_environ::{FunctionLoc, HostPtr, ObjectKind, PrimaryMap, ScopeVec};
use wasmtime_jit::{CodeMemory, CompiledModuleInfo};
use wasmtime_runtime::component::ComponentRuntimeInfo;
use wasmtime_runtime::{
MmapVec, VMArrayCallFunction, VMFuncRef, VMFunctionBody, VMNativeCallFunction,
VMWasmCallFunction,
};
/// A compiled WebAssembly Component.
//
// FIXME: need to write more docs here.
#[derive(Clone)]
pub struct Component {
inner: Arc<ComponentInner>,
}
struct ComponentInner {
/// Core wasm modules that the component defined internally, indexed by the
/// compile-time-assigned `ModuleUpvarIndex`.
static_modules: PrimaryMap<StaticModuleIndex, Module>,
/// Code-related information such as the compiled artifact, type
/// information, etc.
///
/// Note that the `Arc` here is used to share this allocation with internal
/// modules.
code: Arc<CodeObject>,
/// Metadata produced during compilation.
info: CompiledComponentInfo,
}
#[derive(Serialize, Deserialize)]
struct CompiledComponentInfo {
/// Type information calculated during translation about this component.
component: wasmtime_environ::component::Component,
/// Where lowered function trampolines are located within the `text`
/// section of `code_memory`.
///
/// These are the
///
/// 1. Wasm-call,
/// 2. array-call, and
/// 3. native-call
///
/// function pointers that end up in a `VMFuncRef` for each
/// lowering.
trampolines: PrimaryMap<TrampolineIndex, AllCallFunc<FunctionLoc>>,
/// The location of the wasm-to-native trampoline for the `resource.drop`
/// intrinsic.
resource_drop_wasm_to_native_trampoline: Option<FunctionLoc>,
}
pub(crate) struct AllCallFuncPointers {
pub wasm_call: NonNull<VMWasmCallFunction>,
pub array_call: VMArrayCallFunction,
pub native_call: NonNull<VMNativeCallFunction>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn | (engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(any(feature = "cranelift", feature = "winch"))]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
use crate::compiler::CompileInputs;
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut module_translations) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
let compile_inputs = CompileInputs::for_component(
&types,
&component,
module_translations.iter_mut().map(|(i, translation)| {
let functions = mem::take(&mut translation.function_body_inputs);
(i, &*translation, functions)
}),
);
let unlinked_compile_outputs = compile_inputs.compile(&engine)?;
let (compiled_funcs, function_indices) = unlinked_compile_outputs.pre_link();
let mut object = compiler.object(ObjectKind::Component)?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
let (mut object, compilation_artifacts) = function_indices.link_and_append_code(
object,
&engine.config().tunables,
compiler,
compiled_funcs,
module_translations,
)?;
let info = CompiledComponentInfo {
component: component.component,
trampolines: compilation_artifacts.trampolines,
resource_drop_wasm_to_native_trampoline: compilation_artifacts
.resource_drop_wasm_to_native_trampoline,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules: compilation_artifacts.modules,
};
object.serialize_info(&artifacts);
let mmap = object.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures =
SignatureCollection::new_for_module(engine.signatures(), types.module_types());
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
self.inner.component_types()
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn trampoline_ptrs(&self, index: TrampolineIndex) -> AllCallFuncPointers {
let AllCallFunc {
wasm_call,
array_call,
native_call,
} = &self.inner.info.trampolines[index];
AllCallFuncPointers {
wasm_call: self.func(wasm_call).cast(),
array_call: unsafe {
mem::transmute::<NonNull<VMFunctionBody>, VMArrayCallFunction>(
self.func(array_call),
)
},
native_call: self.func(native_call).cast(),
}
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module
pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec())
}
pub(crate) fn runtime_info(&self) -> Arc<dyn ComponentRuntimeInfo> {
self.inner.clone()
}
/// Creates a new `VMFuncRef` with all fields filled out for the destructor
/// specified.
///
/// The `dtor`'s own `VMFuncRef` won't have `wasm_call` filled out but this
/// component may have `resource_drop_wasm_to_native_trampoline` filled out
/// if necessary in which case it's filled in here.
pub(crate) fn resource_drop_func_ref(&self, dtor: &crate::func::HostFunc) -> VMFuncRef {
// Host functions never have their `wasm_call` filled in at this time.
assert!(dtor.func_ref().wasm_call.is_none());
// Note that if `resource_drop_wasm_to_native_trampoline` is not present
// then this can't be called by the component, so it's ok to leave it
// blank.
let wasm_call = self
.inner
.info
.resource_drop_wasm_to_native_trampoline
.as_ref()
.map(|i| self.func(i).cast());
VMFuncRef {
wasm_call,
..*dtor.func_ref()
}
}
/// Returns a summary of the resources required to instantiate this
/// [`Component`][crate::component::Component].
///
/// Note that when a component imports and instantiates another component or
/// core module, we cannot determine ahead of time how many resources
/// instantiating this component will require, and therefore this method
/// will return `None` in these scenarios.
///
/// Potential uses of the returned information:
///
/// * Determining whether your pooling allocator configuration supports
/// instantiating this component.
///
/// * Deciding how many of which `Component` you want to instantiate within
/// a fixed amount of resources, e.g. determining whether to create 5
/// instances of component X or 10 instances of component Y.
///
/// # Example
///
/// ```
/// # fn main() -> wasmtime::Result<()> {
/// use wasmtime::{Config, Engine, component::Component};
///
/// let mut config = Config::new();
/// config.wasm_multi_memory(true);
/// config.wasm_component_model(true);
/// let engine = Engine::new(&config)?;
///
/// let component = Component::new(&engine, &r#"
/// (component
/// ;; Define a core module that uses two memories.
/// (core module $m
/// (memory 1)
/// (memory 6)
/// )
///
/// ;; Instantiate that core module three times.
/// (core instance $i1 (instantiate (module $m)))
/// (core instance $i2 (instantiate (module $m)))
/// (core instance $i3 (instantiate (module $m)))
/// )
/// "#)?;
///
/// let resources = component.resources_required()
/// .expect("this component does not import any core modules or instances");
///
/// // Instantiating the component will require allocating two memories per
/// // core instance, and there are three instances, so six total memories.
/// assert_eq!(resources.num_memories, 6);
/// assert_eq!(resources.max_initial_memory_size, Some(6));
///
/// // The component doesn't need any tables.
/// assert_eq!(resources.num_tables, 0);
/// assert_eq!(resources.max_initial_table_size, None);
/// # Ok(()) }
/// ```
pub fn resources_required(&self) -> Option<ResourcesRequired> {
let mut resources = ResourcesRequired {
num_memories: 0,
max_initial_memory_size: None,
num_tables: 0,
max_initial_table_size: None,
};
for init in &self.env_component().initializers {
match init {
GlobalInitializer::InstantiateModule(inst) => match inst {
InstantiateModule::Static(index, _) => {
let module = self.static_module(*index);
resources.add(&module.resources_required());
}
InstantiateModule::Import(_, _) => {
// We can't statically determine the resources required
// to instantiate this component.
return None;
}
},
GlobalInitializer::LowerImport {.. }
| GlobalInitializer::ExtractMemory(_)
| GlobalInitializer::ExtractRealloc(_)
| GlobalInitializer::ExtractPostReturn(_)
| GlobalInitializer::Resource(_) => {}
}
}
Some(resources)
}
}
impl ComponentRuntimeInfo for ComponentInner {
fn component(&self) -> &wasmtime_environ::component::Component {
&self.info.component
}
fn component_types(&self) -> &Arc<ComponentTypes> {
match self.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
}
| deserialize_file | identifier_name |
component.rs | use crate::code::CodeObject;
use crate::signatures::SignatureCollection;
use crate::{Engine, Module, ResourcesRequired};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::fs;
use std::mem;
use std::path::Path;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::component::{
AllCallFunc, ComponentTypes, GlobalInitializer, InstantiateModule, StaticModuleIndex,
TrampolineIndex, Translator, VMComponentOffsets,
};
use wasmtime_environ::{FunctionLoc, HostPtr, ObjectKind, PrimaryMap, ScopeVec};
use wasmtime_jit::{CodeMemory, CompiledModuleInfo};
use wasmtime_runtime::component::ComponentRuntimeInfo;
use wasmtime_runtime::{
MmapVec, VMArrayCallFunction, VMFuncRef, VMFunctionBody, VMNativeCallFunction,
VMWasmCallFunction,
};
/// A compiled WebAssembly Component.
//
// FIXME: need to write more docs here.
#[derive(Clone)]
pub struct Component {
inner: Arc<ComponentInner>,
}
struct ComponentInner {
/// Core wasm modules that the component defined internally, indexed by the
/// compile-time-assigned `ModuleUpvarIndex`.
static_modules: PrimaryMap<StaticModuleIndex, Module>,
/// Code-related information such as the compiled artifact, type
/// information, etc.
///
/// Note that the `Arc` here is used to share this allocation with internal
/// modules.
code: Arc<CodeObject>,
/// Metadata produced during compilation.
info: CompiledComponentInfo,
}
#[derive(Serialize, Deserialize)]
struct CompiledComponentInfo {
/// Type information calculated during translation about this component.
component: wasmtime_environ::component::Component,
/// Where lowered function trampolines are located within the `text`
/// section of `code_memory`.
///
/// These are the
///
/// 1. Wasm-call,
/// 2. array-call, and
/// 3. native-call
///
/// function pointers that end up in a `VMFuncRef` for each
/// lowering.
trampolines: PrimaryMap<TrampolineIndex, AllCallFunc<FunctionLoc>>,
/// The location of the wasm-to-native trampoline for the `resource.drop`
/// intrinsic.
resource_drop_wasm_to_native_trampoline: Option<FunctionLoc>,
}
pub(crate) struct AllCallFuncPointers {
pub wasm_call: NonNull<VMWasmCallFunction>,
pub array_call: VMArrayCallFunction,
pub native_call: NonNull<VMNativeCallFunction>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> |
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(any(feature = "cranelift", feature = "winch"))]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
use crate::compiler::CompileInputs;
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut module_translations) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
let compile_inputs = CompileInputs::for_component(
&types,
&component,
module_translations.iter_mut().map(|(i, translation)| {
let functions = mem::take(&mut translation.function_body_inputs);
(i, &*translation, functions)
}),
);
let unlinked_compile_outputs = compile_inputs.compile(&engine)?;
let (compiled_funcs, function_indices) = unlinked_compile_outputs.pre_link();
let mut object = compiler.object(ObjectKind::Component)?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
let (mut object, compilation_artifacts) = function_indices.link_and_append_code(
object,
&engine.config().tunables,
compiler,
compiled_funcs,
module_translations,
)?;
let info = CompiledComponentInfo {
component: component.component,
trampolines: compilation_artifacts.trampolines,
resource_drop_wasm_to_native_trampoline: compilation_artifacts
.resource_drop_wasm_to_native_trampoline,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules: compilation_artifacts.modules,
};
object.serialize_info(&artifacts);
let mmap = object.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures =
SignatureCollection::new_for_module(engine.signatures(), types.module_types());
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
self.inner.component_types()
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn trampoline_ptrs(&self, index: TrampolineIndex) -> AllCallFuncPointers {
let AllCallFunc {
wasm_call,
array_call,
native_call,
} = &self.inner.info.trampolines[index];
AllCallFuncPointers {
wasm_call: self.func(wasm_call).cast(),
array_call: unsafe {
mem::transmute::<NonNull<VMFunctionBody>, VMArrayCallFunction>(
self.func(array_call),
)
},
native_call: self.func(native_call).cast(),
}
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module
pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec())
}
pub(crate) fn runtime_info(&self) -> Arc<dyn ComponentRuntimeInfo> {
self.inner.clone()
}
/// Creates a new `VMFuncRef` with all fields filled out for the destructor
/// specified.
///
/// The `dtor`'s own `VMFuncRef` won't have `wasm_call` filled out but this
/// component may have `resource_drop_wasm_to_native_trampoline` filled out
/// if necessary in which case it's filled in here.
pub(crate) fn resource_drop_func_ref(&self, dtor: &crate::func::HostFunc) -> VMFuncRef {
// Host functions never have their `wasm_call` filled in at this time.
assert!(dtor.func_ref().wasm_call.is_none());
// Note that if `resource_drop_wasm_to_native_trampoline` is not present
// then this can't be called by the component, so it's ok to leave it
// blank.
let wasm_call = self
.inner
.info
.resource_drop_wasm_to_native_trampoline
.as_ref()
.map(|i| self.func(i).cast());
VMFuncRef {
wasm_call,
..*dtor.func_ref()
}
}
/// Returns a summary of the resources required to instantiate this
/// [`Component`][crate::component::Component].
///
/// Note that when a component imports and instantiates another component or
/// core module, we cannot determine ahead of time how many resources
/// instantiating this component will require, and therefore this method
/// will return `None` in these scenarios.
///
/// Potential uses of the returned information:
///
/// * Determining whether your pooling allocator configuration supports
/// instantiating this component.
///
/// * Deciding how many of which `Component` you want to instantiate within
/// a fixed amount of resources, e.g. determining whether to create 5
/// instances of component X or 10 instances of component Y.
///
/// # Example
///
/// ```
/// # fn main() -> wasmtime::Result<()> {
/// use wasmtime::{Config, Engine, component::Component};
///
/// let mut config = Config::new();
/// config.wasm_multi_memory(true);
/// config.wasm_component_model(true);
/// let engine = Engine::new(&config)?;
///
/// let component = Component::new(&engine, &r#"
/// (component
/// ;; Define a core module that uses two memories.
/// (core module $m
/// (memory 1)
/// (memory 6)
/// )
///
/// ;; Instantiate that core module three times.
/// (core instance $i1 (instantiate (module $m)))
/// (core instance $i2 (instantiate (module $m)))
/// (core instance $i3 (instantiate (module $m)))
/// )
/// "#)?;
///
/// let resources = component.resources_required()
/// .expect("this component does not import any core modules or instances");
///
/// // Instantiating the component will require allocating two memories per
/// // core instance, and there are three instances, so six total memories.
/// assert_eq!(resources.num_memories, 6);
/// assert_eq!(resources.max_initial_memory_size, Some(6));
///
/// // The component doesn't need any tables.
/// assert_eq!(resources.num_tables, 0);
/// assert_eq!(resources.max_initial_table_size, None);
/// # Ok(()) }
/// ```
pub fn resources_required(&self) -> Option<ResourcesRequired> {
let mut resources = ResourcesRequired {
num_memories: 0,
max_initial_memory_size: None,
num_tables: 0,
max_initial_table_size: None,
};
for init in &self.env_component().initializers {
match init {
GlobalInitializer::InstantiateModule(inst) => match inst {
InstantiateModule::Static(index, _) => {
let module = self.static_module(*index);
resources.add(&module.resources_required());
}
InstantiateModule::Import(_, _) => {
// We can't statically determine the resources required
// to instantiate this component.
return None;
}
},
GlobalInitializer::LowerImport {.. }
| GlobalInitializer::ExtractMemory(_)
| GlobalInitializer::ExtractRealloc(_)
| GlobalInitializer::ExtractPostReturn(_)
| GlobalInitializer::Resource(_) => {}
}
}
Some(resources)
}
}
impl ComponentRuntimeInfo for ComponentInner {
fn component(&self) -> &wasmtime_environ::component::Component {
&self.info.component
}
fn component_types(&self) -> &Arc<ComponentTypes> {
match self.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
}
| {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
} | identifier_body |
component.rs | use crate::code::CodeObject;
use crate::signatures::SignatureCollection;
use crate::{Engine, Module, ResourcesRequired};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::fs;
use std::mem;
use std::path::Path;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::component::{
AllCallFunc, ComponentTypes, GlobalInitializer, InstantiateModule, StaticModuleIndex,
TrampolineIndex, Translator, VMComponentOffsets,
};
use wasmtime_environ::{FunctionLoc, HostPtr, ObjectKind, PrimaryMap, ScopeVec};
use wasmtime_jit::{CodeMemory, CompiledModuleInfo};
use wasmtime_runtime::component::ComponentRuntimeInfo;
use wasmtime_runtime::{
MmapVec, VMArrayCallFunction, VMFuncRef, VMFunctionBody, VMNativeCallFunction,
VMWasmCallFunction,
};
/// A compiled WebAssembly Component.
//
// FIXME: need to write more docs here.
#[derive(Clone)]
pub struct Component {
inner: Arc<ComponentInner>,
}
struct ComponentInner {
/// Core wasm modules that the component defined internally, indexed by the
/// compile-time-assigned `ModuleUpvarIndex`.
static_modules: PrimaryMap<StaticModuleIndex, Module>,
/// Code-related information such as the compiled artifact, type
/// information, etc.
///
/// Note that the `Arc` here is used to share this allocation with internal
/// modules.
code: Arc<CodeObject>,
/// Metadata produced during compilation.
info: CompiledComponentInfo,
}
#[derive(Serialize, Deserialize)]
struct CompiledComponentInfo {
/// Type information calculated during translation about this component.
component: wasmtime_environ::component::Component,
/// Where lowered function trampolines are located within the `text`
/// section of `code_memory`.
///
/// These are the
///
/// 1. Wasm-call,
/// 2. array-call, and
/// 3. native-call
///
/// function pointers that end up in a `VMFuncRef` for each
/// lowering.
trampolines: PrimaryMap<TrampolineIndex, AllCallFunc<FunctionLoc>>,
/// The location of the wasm-to-native trampoline for the `resource.drop`
/// intrinsic.
resource_drop_wasm_to_native_trampoline: Option<FunctionLoc>,
}
pub(crate) struct AllCallFuncPointers {
pub wasm_call: NonNull<VMWasmCallFunction>,
pub array_call: VMArrayCallFunction,
pub native_call: NonNull<VMNativeCallFunction>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(any(feature = "cranelift", feature = "winch"))]
#[cfg_attr(nightlydoc, doc(cfg(any(feature = "cranelift", feature = "winch"))))]
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(any(feature = "cranelift", feature = "winch"))]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
use crate::compiler::CompileInputs;
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut module_translations) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
let compile_inputs = CompileInputs::for_component(
&types,
&component,
module_translations.iter_mut().map(|(i, translation)| {
let functions = mem::take(&mut translation.function_body_inputs);
(i, &*translation, functions)
}),
);
let unlinked_compile_outputs = compile_inputs.compile(&engine)?;
let (compiled_funcs, function_indices) = unlinked_compile_outputs.pre_link();
let mut object = compiler.object(ObjectKind::Component)?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
let (mut object, compilation_artifacts) = function_indices.link_and_append_code(
object,
&engine.config().tunables,
compiler,
compiled_funcs,
module_translations,
)?;
let info = CompiledComponentInfo {
component: component.component,
trampolines: compilation_artifacts.trampolines,
resource_drop_wasm_to_native_trampoline: compilation_artifacts
.resource_drop_wasm_to_native_trampoline,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules: compilation_artifacts.modules,
};
object.serialize_info(&artifacts);
let mmap = object.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Validate that the component can be used with the current instance
// allocator.
engine.allocator().validate_component(
&info.component,
&VMComponentOffsets::new(HostPtr, &info.component),
&|module_index| &static_modules[module_index].module,
)?;
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures =
SignatureCollection::new_for_module(engine.signatures(), types.module_types());
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
self.inner.component_types()
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn trampoline_ptrs(&self, index: TrampolineIndex) -> AllCallFuncPointers {
let AllCallFunc {
wasm_call,
array_call,
native_call,
} = &self.inner.info.trampolines[index];
AllCallFuncPointers {
wasm_call: self.func(wasm_call).cast(),
array_call: unsafe {
mem::transmute::<NonNull<VMFunctionBody>, VMArrayCallFunction>(
self.func(array_call),
)
},
native_call: self.func(native_call).cast(),
}
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module | }
pub(crate) fn runtime_info(&self) -> Arc<dyn ComponentRuntimeInfo> {
self.inner.clone()
}
/// Creates a new `VMFuncRef` with all fields filled out for the destructor
/// specified.
///
/// The `dtor`'s own `VMFuncRef` won't have `wasm_call` filled out but this
/// component may have `resource_drop_wasm_to_native_trampoline` filled out
/// if necessary in which case it's filled in here.
pub(crate) fn resource_drop_func_ref(&self, dtor: &crate::func::HostFunc) -> VMFuncRef {
// Host functions never have their `wasm_call` filled in at this time.
assert!(dtor.func_ref().wasm_call.is_none());
// Note that if `resource_drop_wasm_to_native_trampoline` is not present
// then this can't be called by the component, so it's ok to leave it
// blank.
let wasm_call = self
.inner
.info
.resource_drop_wasm_to_native_trampoline
.as_ref()
.map(|i| self.func(i).cast());
VMFuncRef {
wasm_call,
..*dtor.func_ref()
}
}
/// Returns a summary of the resources required to instantiate this
/// [`Component`][crate::component::Component].
///
/// Note that when a component imports and instantiates another component or
/// core module, we cannot determine ahead of time how many resources
/// instantiating this component will require, and therefore this method
/// will return `None` in these scenarios.
///
/// Potential uses of the returned information:
///
/// * Determining whether your pooling allocator configuration supports
/// instantiating this component.
///
/// * Deciding how many of which `Component` you want to instantiate within
/// a fixed amount of resources, e.g. determining whether to create 5
/// instances of component X or 10 instances of component Y.
///
/// # Example
///
/// ```
/// # fn main() -> wasmtime::Result<()> {
/// use wasmtime::{Config, Engine, component::Component};
///
/// let mut config = Config::new();
/// config.wasm_multi_memory(true);
/// config.wasm_component_model(true);
/// let engine = Engine::new(&config)?;
///
/// let component = Component::new(&engine, &r#"
/// (component
/// ;; Define a core module that uses two memories.
/// (core module $m
/// (memory 1)
/// (memory 6)
/// )
///
/// ;; Instantiate that core module three times.
/// (core instance $i1 (instantiate (module $m)))
/// (core instance $i2 (instantiate (module $m)))
/// (core instance $i3 (instantiate (module $m)))
/// )
/// "#)?;
///
/// let resources = component.resources_required()
/// .expect("this component does not import any core modules or instances");
///
/// // Instantiating the component will require allocating two memories per
/// // core instance, and there are three instances, so six total memories.
/// assert_eq!(resources.num_memories, 6);
/// assert_eq!(resources.max_initial_memory_size, Some(6));
///
/// // The component doesn't need any tables.
/// assert_eq!(resources.num_tables, 0);
/// assert_eq!(resources.max_initial_table_size, None);
/// # Ok(()) }
/// ```
pub fn resources_required(&self) -> Option<ResourcesRequired> {
let mut resources = ResourcesRequired {
num_memories: 0,
max_initial_memory_size: None,
num_tables: 0,
max_initial_table_size: None,
};
for init in &self.env_component().initializers {
match init {
GlobalInitializer::InstantiateModule(inst) => match inst {
InstantiateModule::Static(index, _) => {
let module = self.static_module(*index);
resources.add(&module.resources_required());
}
InstantiateModule::Import(_, _) => {
// We can't statically determine the resources required
// to instantiate this component.
return None;
}
},
GlobalInitializer::LowerImport {.. }
| GlobalInitializer::ExtractMemory(_)
| GlobalInitializer::ExtractRealloc(_)
| GlobalInitializer::ExtractPostReturn(_)
| GlobalInitializer::Resource(_) => {}
}
}
Some(resources)
}
}
impl ComponentRuntimeInfo for ComponentInner {
fn component(&self) -> &wasmtime_environ::component::Component {
&self.info.component
}
fn component_types(&self) -> &Arc<ComponentTypes> {
match self.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
} | pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec()) | random_line_split |
annotate.rs | extern crate chrono;
extern crate id3;
extern crate mp3_duration;
extern crate regex;
extern crate reqwest;
use std::{
fs::{
read_dir,
rename,
},
io::{
Read,
},
iter::{
repeat_with,
},
path::{
Path,
PathBuf,
},
time::{
Duration,
}
};
use chrono::{
Datelike,
format::{
ParseResult,
},
NaiveDate,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
}
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists!= &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn | (
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
}
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}",
tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
});
}
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if!expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if!album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name!= file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len()!= data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: "unknown track name".to_string(),
track_number: track_counter,
track_artists: None,
expected_duration_ms: 0,
};
track_counter += 1;
track_data
}).take(files.len()));
files.iter().zip(
data.into_iter(),
).map(|(track_file, track_data)| {
annotate_file(track_file, track_data, &album_image, rename_files)
.and_then(|_| {
add_whitelist(dir.to_string_lossy().to_string())
})
}).collect()
}
pub fn test_run(
dir: &PathBuf,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let files = get_tracks_files(&abs_path)?;
files.iter().map(|track_file| {
mp3_duration::from_path(track_file.as_path()).map(|_| {
()
}).unwrap_or_else(|err| {
println!("error measuring {}: {}", track_file.display(), err);
});
Ok(())
}).collect()
}
| get_tracks_data | identifier_name |
annotate.rs | extern crate chrono;
extern crate id3;
extern crate mp3_duration;
extern crate regex;
extern crate reqwest;
use std::{
fs::{
read_dir,
rename,
},
io::{
Read,
},
iter::{
repeat_with,
},
path::{
Path,
PathBuf,
},
time::{
Duration,
}
};
use chrono::{
Datelike,
format::{
ParseResult,
},
NaiveDate,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
}
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists!= &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn get_tracks_data(
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
}
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}", | }
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if!expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if!album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name!= file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len()!= data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: "unknown track name".to_string(),
track_number: track_counter,
track_artists: None,
expected_duration_ms: 0,
};
track_counter += 1;
track_data
}).take(files.len()));
files.iter().zip(
data.into_iter(),
).map(|(track_file, track_data)| {
annotate_file(track_file, track_data, &album_image, rename_files)
.and_then(|_| {
add_whitelist(dir.to_string_lossy().to_string())
})
}).collect()
}
pub fn test_run(
dir: &PathBuf,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let files = get_tracks_files(&abs_path)?;
files.iter().map(|track_file| {
mp3_duration::from_path(track_file.as_path()).map(|_| {
()
}).unwrap_or_else(|err| {
println!("error measuring {}: {}", track_file.display(), err);
});
Ok(())
}).collect()
} | tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
}); | random_line_split |
annotate.rs | extern crate chrono;
extern crate id3;
extern crate mp3_duration;
extern crate regex;
extern crate reqwest;
use std::{
fs::{
read_dir,
rename,
},
io::{
Read,
},
iter::{
repeat_with,
},
path::{
Path,
PathBuf,
},
time::{
Duration,
}
};
use chrono::{
Datelike,
format::{
ParseResult,
},
NaiveDate,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" |
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists!= &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn get_tracks_data(
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
}
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}",
tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
});
}
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if!expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if!album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name!= file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len()!= data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: "unknown track name".to_string(),
track_number: track_counter,
track_artists: None,
expected_duration_ms: 0,
};
track_counter += 1;
track_data
}).take(files.len()));
files.iter().zip(
data.into_iter(),
).map(|(track_file, track_data)| {
annotate_file(track_file, track_data, &album_image, rename_files)
.and_then(|_| {
add_whitelist(dir.to_string_lossy().to_string())
})
}).collect()
}
pub fn test_run(
dir: &PathBuf,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let files = get_tracks_files(&abs_path)?;
files.iter().map(|track_file| {
mp3_duration::from_path(track_file.as_path()).map(|_| {
()
}).unwrap_or_else(|err| {
println!("error measuring {}: {}", track_file.display(), err);
});
Ok(())
}).collect()
}
| {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
} | conditional_block |
annotate.rs | extern crate chrono;
extern crate id3;
extern crate mp3_duration;
extern crate regex;
extern crate reqwest;
use std::{
fs::{
read_dir,
rename,
},
io::{
Read,
},
iter::{
repeat_with,
},
path::{
Path,
PathBuf,
},
time::{
Duration,
}
};
use chrono::{
Datelike,
format::{
ParseResult,
},
NaiveDate,
};
use id3::{
frame::{
Picture,
PictureType,
},
Tag,
Timestamp,
Version,
};
use regex::{
Regex,
};
use crate::{
types::{
AlbumFull,
ClientWithToken,
SimpleError,
Track,
},
utils::{
get_with_retry,
},
whitelist::{
add_whitelist,
},
};
#[derive(Debug)]
pub struct TrackData {
album_name: String,
album_artists: String,
release_date: Option<Timestamp>,
image_url: Option<String>,
track_name: String,
track_number: i32,
track_artists: Option<String>,
expected_duration_ms: i32,
}
impl TrackData {
pub fn release_date_from(
album_full: &AlbumFull,
) -> ParseResult<Timestamp> {
let mut year = -1;
let mut month = None;
let mut day = None;
if album_full.release_date_precision == "year" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y",
)?;
year = date.year();
}
if album_full.release_date_precision == "month" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m",
)?;
year = date.year();
month = Some(date.month() as u8);
}
else if album_full.release_date_precision == "day" {
let date = NaiveDate::parse_from_str(
&album_full.release_date[..],
"%Y-%m-%d",
).expect("wat");
year = date.year();
month = Some(date.month() as u8);
day = Some(date.day() as u8);
}
Ok(Timestamp {
year: year,
month: month,
day: day,
hour: None,
minute: None,
second: None,
})
}
pub fn from(
track: Track,
album_full: &AlbumFull,
) -> Self {
let album_artists = album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
let track_artists = track.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", ");
Self {
album_name: album_full.name.clone(),
album_artists: album_artists.clone(),
release_date: Self::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: track.name,
track_number: track.track_number,
track_artists: Some(track_artists).filter(|artists| {
// need clone?
artists!= &album_artists
}),
expected_duration_ms: track.duration_ms,
}
}
}
fn get_tracks_files(
abs_path: &Path,
) -> Result<Vec<PathBuf>, SimpleError> {
read_dir(abs_path).map_err(SimpleError::from).and_then(|dir_iter| {
dir_iter.map(|entry| {
entry.map(|entry_ok| {
entry_ok.path()
}).map_err(SimpleError::from)
}).collect::<Result<Vec<PathBuf>, SimpleError>>()
}).map(|mut paths| {
paths.sort();
paths.into_iter().filter(|path| {
path.is_file()
}).collect()
})
}
pub fn get_tracks_data(
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<Vec<TrackData>, SimpleError> {
let mut tracks = Vec::new();
let mut paging = album_full.tracks.clone();
while let Some(next_url) = paging.next {
tracks.append(&mut paging.items);
paging = get_with_retry(
&next_url[..],
client_with_token,
)?;
}
tracks.append(&mut paging.items);
Ok(tracks.into_iter().map(|track| {
TrackData::from(track, album_full)
}).collect())
}
fn norm_track_number(
track_number: i32,
) -> String |
fn expected_time(
file: &PathBuf,
track_data: &TrackData,
) -> bool {
let actual_duration = mp3_duration::from_path(file.as_path()).expect(
&format!("error measuring {}", file.display())[..],
);
let expected_duration = Duration::from_millis(
track_data.expected_duration_ms as u64,
);
actual_duration.checked_sub(expected_duration).or(
expected_duration.checked_sub(actual_duration)
).and_then(|res| {
res.checked_sub(Duration::from_secs(5))
}).is_none()
}
fn get_image(
image_url: &str,
) -> Result<Vec<u8>, SimpleError> {
reqwest::get(image_url).map_err(SimpleError::from).and_then(|response| {
response.bytes().map(|byte_res| {
byte_res.map_err(SimpleError::from)
}).collect()
})
}
fn add_image(
tags: &mut Tag,
image: &Vec<u8>,
) {
tags.add_picture(Picture {
mime_type: "image/jpeg".to_string(),
picture_type: PictureType::CoverFront,
description: format!(
"Cover for {} by {}",
tags.album().expect("error in writing tags"),
tags.artist().expect("error in writing tags"),
),
data: image.clone(),
});
}
fn annotate_tags(
tags: &mut Tag,
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
) -> String {
lazy_static! {
static ref INVALID_FILE_CHRS: Regex = Regex::new(r"[^\w\s.\(\)]+").unwrap();
}
let mut new_name = format!(
"{} {}.mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
if!expected_time(file, &track_data) {
new_name = format!(
"{} {} (unexpected duration).mp3",
norm_track_number(track_data.track_number),
track_data.track_name,
);
}
tags.set_album(track_data.album_name);
let album_artists = track_data.album_artists.clone();
track_data.track_artists.map(|artists| {
tags.set_album_artist(album_artists.clone());
tags.set_artist(artists);
}).unwrap_or_else(|| {
tags.set_artist(album_artists);
});
track_data.release_date.map(|date| {
tags.set_date_released(date);
});
tags.set_title(track_data.track_name);
tags.set_track(track_data.track_number as u32);
if!album_image.is_empty() {
add_image(tags, album_image)
}
INVALID_FILE_CHRS.replace_all(&new_name[..], "_").to_string()
}
fn annotate_file(
file: &PathBuf,
track_data: TrackData,
album_image: &Vec<u8>,
rename_file: bool,
) -> Result<(), SimpleError> {
let mut tags = Tag::new();
let new_name = annotate_tags(&mut tags, file, track_data, album_image);
tags.write_to_path(file, Version::Id3v24).map_err(SimpleError::from)
.and_then(|_| {
if rename_file {
return file.as_path().file_name().ok_or(SimpleError {
msg: format!("{} not file?", file.display()),
}).and_then(|file_name| {
if new_name!= file_name.to_string_lossy() {
return rename(
file,
file.with_file_name(new_name),
).map_err(SimpleError::from);
}
Ok(())
});
}
return Ok(());
})
}
pub fn annotate(
dir: &PathBuf,
album_full: &AlbumFull,
client_with_token: &ClientWithToken,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let mut rename_files = true;
let files = get_tracks_files(&abs_path)?;
let mut data = get_tracks_data(album_full, client_with_token)?;
if files.len()!= data.len() {
println!(
"number of files in {} should be {}, not renaming",
dir.display(),
data.len(),
);
rename_files = false;
}
let album_image = album_full.images.iter().next().map(|image| {
image.url.clone()
}).map(|url| {
get_image(&url[..]).unwrap_or_else(|err| {
println!("error getting image for {}: {}", album_full.name, err.msg);
vec![]
})
}).unwrap_or_else(|| {
println!("no image for {}", album_full.name);
vec![]
});
let mut track_counter = data.len() as i32;
data.extend(repeat_with(|| {
let track_data = TrackData {
album_name: album_full.name.clone(),
album_artists: album_full.artists.iter().map(|artist| {
artist.name.clone()
}).collect::<Vec<String>>().join(", "),
release_date: TrackData::release_date_from(album_full).ok(),
image_url: album_full.images.iter().next().map(|image| {
image.url.clone()
}),
track_name: "unknown track name".to_string(),
track_number: track_counter,
track_artists: None,
expected_duration_ms: 0,
};
track_counter += 1;
track_data
}).take(files.len()));
files.iter().zip(
data.into_iter(),
).map(|(track_file, track_data)| {
annotate_file(track_file, track_data, &album_image, rename_files)
.and_then(|_| {
add_whitelist(dir.to_string_lossy().to_string())
})
}).collect()
}
pub fn test_run(
dir: &PathBuf,
) -> Result<(), SimpleError> {
let abs_path = Path::new("/home/banana/music/").join(&dir.as_path());
let files = get_tracks_files(&abs_path)?;
files.iter().map(|track_file| {
mp3_duration::from_path(track_file.as_path()).map(|_| {
()
}).unwrap_or_else(|err| {
println!("error measuring {}: {}", track_file.display(), err);
});
Ok(())
}).collect()
}
| {
if track_number < 10 {
return format!("0{}", track_number);
}
track_number.to_string()
} | identifier_body |
terminal.rs | use std::{
io::{self, Write},
ops::Range,
cmp::min,
iter::Peekable
};
use crate::{
iface::{TerminalPlugin, FormatLike},
config
};
use smallvec::{smallvec, SmallVec};
use terminfo::{expand, Database, capability as cap};
// pub const CORNER_SW: char = '╗';
const CORNER_SE: char = '╔';
const CORNER_NSE: char = '╠';
const LINE: char = '═';
const TEXT_START: char = '⟦';
const TEXT_END: char = '⟧';
const CORNER_NS: char = '║';
// pub const ERROR_START: char = '!';
// pub const ERROR_END: char = '!';
// pub const CORNER_NW: char = '╝';
const CORNER_NE: char = '╚';
const ERR_START: &str = "!!";
type Color = u8;
mod color {
#![allow(unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::FormatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug)]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return;
}
}
self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if!(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last | t_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
struct TermWriter<'a, W: Write+'a> {
terminal: &'a Terminal,
out: W
}
impl<'a, W: 'a> TermWriter<'a, W>
where W: Write
{
fn fmt(&mut self, fmt: FormatLike) {
write!(&mut self.out, "\x01").unwrap();
let color = fmt_to_color(fmt);
if let Some(cap) = self.terminal.terminfo.get::<cap::SetAForeground>() {
expand!(&mut self.out, cap.as_ref(); color).unwrap();
}
write!(&mut self.out, "\x02").unwrap();
}
fn reset_fmt(&mut self) {
write!(&mut self.out, "\x01").unwrap();
let terminfo = &self.terminal.terminfo;
if let Some(cap) = terminfo.get::<cap::ExitAttributeMode>() {
expand!(&mut self.out, cap.as_ref();).unwrap();
} else if let Some(cap) = terminfo.get::<cap::SetAttributes>() {
expand!(&mut self.out, cap.as_ref(); 0).unwrap();
} else if let Some(cap) = terminfo.get::<cap::OrigPair>() {
expand!(&mut self.out, cap.as_ref();).unwrap()
}
write!(&mut self.out, "\x02").unwrap();
}
}
impl<'a, W: 'a> Write for TermWriter<'a, W>
where W: Write
{
fn flush(&mut self) -> Result<(), io::Error> {
self.out.flush()
}
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.out.write(buf)
}
}
#[derive(Debug)]
struct TextSegment {
text: String,
fmt: FormatLike,
pre_calculated_length: usize,
}
impl TextSegment {
pub fn new(text: impl Into<String>, fmt: FormatLike) -> Self {
let text = text.into();
let len = text.chars().count();
TextSegment {
text,
fmt,
pre_calculated_length: len,
}
}
} | _split_idx = idx;
}
}
if las | conditional_block |
terminal.rs | use std::{
io::{self, Write},
ops::Range,
cmp::min,
iter::Peekable
};
use crate::{
iface::{TerminalPlugin, FormatLike},
config
};
use smallvec::{smallvec, SmallVec};
use terminfo::{expand, Database, capability as cap};
// pub const CORNER_SW: char = '╗';
const CORNER_SE: char = '╔';
const CORNER_NSE: char = '╠';
const LINE: char = '═';
const TEXT_START: char = '⟦';
const TEXT_END: char = '⟧';
const CORNER_NS: char = '║';
// pub const ERROR_START: char = '!';
// pub const ERROR_END: char = '!';
// pub const CORNER_NW: char = '╝';
const CORNER_NE: char = '╚';
const ERR_START: &str = "!!";
type Color = u8;
mod color {
#![allow(unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::FormatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug)]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return;
}
}
self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if!(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last_split_idx = idx;
}
}
if last_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
struct TermWriter<'a, W: Write+'a> {
terminal: &'a Terminal,
out: W
}
impl<'a, W: 'a> TermWriter<'a, W>
where W: Write
{
fn fmt(&mut self, fmt: FormatLike) {
write!(&mut self.out, "\x01").unwrap();
let color = fmt_to_color(fmt);
if let Some(cap) = self.terminal.terminfo.get::<cap::SetAForeground>() {
expand!(&mut self.out, cap.as_ref(); color).unwrap();
}
write!(&mut self.out, "\x02").unwrap();
}
fn reset_fmt(&mut self) {
write!(&mut self.out, "\x01").unwrap();
let terminfo = &self.terminal.terminfo;
if let Some(cap) = terminfo.get::<cap::ExitAttributeMode>() {
expand!(&mut self.out, cap.as_ref();).unwrap();
} else if let Some(cap) = terminfo.get::<cap::SetAttributes>() {
expand!(&mut self.out, cap.as_ref(); 0).unwrap();
} else if let Some(cap) = terminfo.get::<cap::OrigPair>() {
expand!(&mut self.out, cap.as_ref();).unwrap()
}
write!(&mut self.out, "\x02").unwrap();
}
}
impl<'a, W: 'a> Write for TermWriter<'a, W>
where W: Write
{
fn flush(&mut self) -> | t<(), io::Error> {
self.out.flush()
}
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.out.write(buf)
}
}
#[derive(Debug)]
struct TextSegment {
text: String,
fmt: FormatLike,
pre_calculated_length: usize,
}
impl TextSegment {
pub fn new(text: impl Into<String>, fmt: FormatLike) -> Self {
let text = text.into();
let len = text.chars().count();
TextSegment {
text,
fmt,
pre_calculated_length: len,
}
}
} | Resul | identifier_name |
terminal.rs | use std::{
io::{self, Write},
ops::Range,
cmp::min,
iter::Peekable
};
use crate::{
iface::{TerminalPlugin, FormatLike},
config
};
use smallvec::{smallvec, SmallVec};
use terminfo::{expand, Database, capability as cap};
// pub const CORNER_SW: char = '╗';
const CORNER_SE: char = '╔';
const CORNER_NSE: char = '╠';
const LINE: char = '═';
const TEXT_START: char = '⟦';
const TEXT_END: char = '⟧';
const CORNER_NS: char = '║';
// pub const ERROR_START: char = '!';
// pub const ERROR_END: char = '!';
// pub const CORNER_NW: char = '╝';
const CORNER_NE: char = '╚';
const ERR_START: &str = "!!";
type Color = u8;
mod color {
#![allow(unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::Fo | )]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return;
}
}
self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if!(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last_split_idx = idx;
}
}
if last_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
struct TermWriter<'a, W: Write+'a> {
terminal: &'a Terminal,
out: W
}
impl<'a, W: 'a> TermWriter<'a, W>
where W: Write
{
fn fmt(&mut self, fmt: FormatLike) {
write!(&mut self.out, "\x01").unwrap();
let color = fmt_to_color(fmt);
if let Some(cap) = self.terminal.terminfo.get::<cap::SetAForeground>() {
expand!(&mut self.out, cap.as_ref(); color).unwrap();
}
write!(&mut self.out, "\x02").unwrap();
}
fn reset_fmt(&mut self) {
write!(&mut self.out, "\x01").unwrap();
let terminfo = &self.terminal.terminfo;
if let Some(cap) = terminfo.get::<cap::ExitAttributeMode>() {
expand!(&mut self.out, cap.as_ref();).unwrap();
} else if let Some(cap) = terminfo.get::<cap::SetAttributes>() {
expand!(&mut self.out, cap.as_ref(); 0).unwrap();
} else if let Some(cap) = terminfo.get::<cap::OrigPair>() {
expand!(&mut self.out, cap.as_ref();).unwrap()
}
write!(&mut self.out, "\x02").unwrap();
}
}
impl<'a, W: 'a> Write for TermWriter<'a, W>
where W: Write
{
fn flush(&mut self) -> Result<(), io::Error> {
self.out.flush()
}
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.out.write(buf)
}
}
#[derive(Debug)]
struct TextSegment {
text: String,
fmt: FormatLike,
pre_calculated_length: usize,
}
impl TextSegment {
pub fn new(text: impl Into<String>, fmt: FormatLike) -> Self {
let text = text.into();
let len = text.chars().count();
TextSegment {
text,
fmt,
pre_calculated_length: len,
}
}
} | rmatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug | identifier_body |
terminal.rs | use std::{
io::{self, Write},
ops::Range,
cmp::min,
iter::Peekable
};
use crate::{
iface::{TerminalPlugin, FormatLike},
config
};
use smallvec::{smallvec, SmallVec};
use terminfo::{expand, Database, capability as cap};
// pub const CORNER_SW: char = '╗';
const CORNER_SE: char = '╔';
const CORNER_NSE: char = '╠';
const LINE: char = '═';
const TEXT_START: char = '⟦';
const TEXT_END: char = '⟧';
const CORNER_NS: char = '║';
// pub const ERROR_START: char = '!';
// pub const ERROR_END: char = '!';
// pub const CORNER_NW: char = '╝';
const CORNER_NE: char = '╚';
const ERR_START: &str = "!!";
type Color = u8;
mod color {
#![allow(unused)]
use super::Color;
pub const TEXT_WHITE: Color = 251;
pub const CYAN: Color = 6;
pub const YELLOW: Color = 3;
pub const RED: Color = 1;
pub const BRIGHT_RED: Color = 9;
pub const BRIGHT_GREEN: Color = 10;
pub const LIGHT_GRAY: Color = 243;
pub const LESS_LIGHT_GRAY: Color = 240;
pub const JUNGLE_GREEN: Color = 112;
pub const ORANGE: Color = 208;
pub const SIGNALING_RED: Color = 196;
}
fn fmt_to_color(fmt: FormatLike) -> Color {
use self::FormatLike::*;
match fmt {
Text => color::TEXT_WHITE,
PrimaryText => color::JUNGLE_GREEN,
Lines => color::LIGHT_GRAY,
SoftWarning => color::ORANGE,
HardWarning => color::SIGNALING_RED,
Error => color::RED,
ExplicitOk => color::BRIGHT_GREEN,
Hidden => color::LESS_LIGHT_GRAY
}
}
#[derive(Debug)]
pub struct Terminal {
column_count: usize,
text_segments: SmallVec<[SmallVec<[TextSegment; 2]>; 2]>,
error_segments: Vec<(&'static str, String)>,
terminfo: Database,
}
impl TerminalPlugin for Terminal {
fn new(column_count: usize) -> Self {
let terminfo = Database::from_env().unwrap();
Terminal {
column_count,
text_segments: Default::default(),
error_segments: Default::default(),
terminfo
}
}
fn add_text_segment(&mut self, text: &str, fmt_args: FormatLike) {
self.text_segments.push(smallvec![TextSegment::new(text, fmt_args)]);
}
fn add_error_segment(&mut self, scope: &'static str, msg: &str) {
self.error_segments.push((scope, msg.into()));
}
fn extend_previous_segment(&mut self, text: &str, fmt_args: FormatLike) {
{
if let Some(last) = self.text_segments.last_mut() {
last.push(TextSegment::new(text, fmt_args));
return; | self.add_text_segment(text, fmt_args);
}
fn flush_to_stdout(&self, prompt_ending: &str) {
//TODO split into multiple functions
// - one for outputting text segments
// - one for outputting error segments
let layout = self.calculate_layout();
let stdout = io::stdout();
let mut term = self.writer(stdout.lock());
self.render_text_segments(&mut term, layout);
self.render_error_segments(&mut term);
term.fmt(FormatLike::Lines);
write!(term, "{}{}", CORNER_NE, prompt_ending).unwrap();
term.reset_fmt();
term.flush().unwrap();
}
}
impl Terminal {
fn render_text_segments<W>(&self, term: &mut TermWriter<W>, layout: Vec<LineLayout>)
where W: Write
{
let mut first = true;
for LineLayout { segments, join_padding, rem_padding } in layout {
term.fmt(FormatLike::Lines);
if first {
first = false;
write!(term, "{}", CORNER_SE).unwrap();
} else {
write!(term, "{}", CORNER_NSE).unwrap();
}
for segment_group in &self.text_segments[segments] {
for segment in segment_group {
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_START).unwrap();
term.fmt(segment.fmt);
write!(term, "{}", &segment.text).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "{}", TEXT_END).unwrap();
}
for _ in 0..join_padding {
write!(term, "{}", LINE).unwrap();
}
}
for _ in 0..rem_padding {
write!(term, "{}", LINE).unwrap();
}
write!(term, "\n").unwrap();
}
}
fn render_error_segments<W>(&self, term: &mut TermWriter<W>)
where W: Write
{
for (scope, text) in self.error_segments.iter() {
term.fmt(FormatLike::Lines);
write!(term, "{}", CORNER_NSE).unwrap();
term.fmt(FormatLike::Error);
let mut text = text.trim();
write!(term, "{} {}: ", ERR_START, scope).unwrap();
let bulk_len = 1 + ERR_START.len() + 1 + scope.len() + 2;
let mut rem_len = self.column_count.checked_sub(bulk_len).unwrap_or(0);
loop {
if text.len() <= rem_len {
term.fmt(FormatLike::Error);
write!(term, "{}", text).unwrap();
break;
} else {
//find split point and split text
let split_idx = find_viable_split_idx(text, rem_len);
let (line_text, new_text) = text.split_at(split_idx);
text = new_text.trim_start();
rem_len = self.column_count - 3;
term.fmt(FormatLike::Error);
write!(term, "{text}", text=line_text.trim_end()).unwrap();
term.fmt(FormatLike::Lines);
write!(term, "\n{sep}", sep=CORNER_NS).unwrap();
for _ in 0..ERR_START.len()+1 {
write!(term, " ").unwrap();
}
}
}
write!(term, "\n").unwrap();
}
}
}
fn find_viable_split_idx(text: &str, max_len: usize) -> usize {
let mut last_split_idx = 0;
let mut last_char_idx = 0;
for (idx, ch) in text.char_indices() {
if idx + ch.len_utf8() > max_len {
break;
}
last_char_idx = idx;
if!(ch.is_alphanumeric() || ch == '.' || ch=='!' || ch==':' || ch=='?') {
last_split_idx = idx;
}
}
if last_split_idx == 0 {
last_char_idx
} else {
last_split_idx
}
}
impl Terminal {
fn writer<W>(&self, out: W) -> TermWriter<W>
where W: Write
{
TermWriter {
terminal: self,
out
}
}
fn calculate_layout(&self) -> Vec<LineLayout> {
// -1 as it starts with a `╠` or similar
let init_rem_space = self.column_count - 1;
let mut lines = Vec::new();
let mut text_segments = self.text_segments.iter().peekable();
let mut idx_offset = 0;
while let Some(line) = calc_next_line_layout(&mut text_segments, init_rem_space, idx_offset) {
idx_offset = line.segments.end;
lines.push(line)
}
lines
}
}
fn calc_next_line_layout<'a>(
iter: &mut Peekable<impl Iterator<Item=impl IntoIterator<Item=&'a TextSegment>+Copy>>,
init_rem_space: usize,
idx_offset: usize
) -> Option<LineLayout> {
let first_seg =
match iter.next() {
Some(seg) => seg,
None => {return None;}
};
let first_item = idx_offset;
let mut after_last_item = idx_offset + 1;
let first_len = calc_min_segment_group_len(first_seg);
if first_len >= init_rem_space {
let segments = first_item..after_last_item;
return Some(LineLayout {
segments,
join_padding: 0,
rem_padding: 0
});
}
let mut rem_space = init_rem_space - first_len;
while let Some(segment_group_iter) = iter.peek().map(|i| *i) {
let min_len = calc_min_segment_group_len(segment_group_iter);
if rem_space > min_len {
rem_space -= min_len;
after_last_item += 1;
iter.next();
} else {
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
return Some(LineLayout { segments, join_padding, rem_padding })
}
}
let segments = first_item..after_last_item;
let (join_padding, rem_padding) = calc_padding(first_item, after_last_item, rem_space);
Some(LineLayout { segments, join_padding, rem_padding })
}
fn calc_padding(
first_item: usize,
after_last_item: usize,
rem_space: usize
) -> (usize, usize) {
let nr_items = after_last_item - first_item;
let join_padding = rem_space / nr_items;
let join_padding = min(join_padding, config::MAX_JOIN_PADDING);
let rem_padding = rem_space - (join_padding * nr_items);
(join_padding, rem_padding)
}
fn calc_min_segment_group_len<'a>(group: impl IntoIterator<Item=&'a TextSegment>) -> usize {
// +2 as in TEXT_START(char) + TEXT_END(char)
group.into_iter().map(|seg| seg.pre_calculated_length + 2).sum()
}
struct LineLayout {
segments: Range<usize>,
join_padding: usize,
rem_padding: usize
}
struct TermWriter<'a, W: Write+'a> {
terminal: &'a Terminal,
out: W
}
impl<'a, W: 'a> TermWriter<'a, W>
where W: Write
{
fn fmt(&mut self, fmt: FormatLike) {
write!(&mut self.out, "\x01").unwrap();
let color = fmt_to_color(fmt);
if let Some(cap) = self.terminal.terminfo.get::<cap::SetAForeground>() {
expand!(&mut self.out, cap.as_ref(); color).unwrap();
}
write!(&mut self.out, "\x02").unwrap();
}
fn reset_fmt(&mut self) {
write!(&mut self.out, "\x01").unwrap();
let terminfo = &self.terminal.terminfo;
if let Some(cap) = terminfo.get::<cap::ExitAttributeMode>() {
expand!(&mut self.out, cap.as_ref();).unwrap();
} else if let Some(cap) = terminfo.get::<cap::SetAttributes>() {
expand!(&mut self.out, cap.as_ref(); 0).unwrap();
} else if let Some(cap) = terminfo.get::<cap::OrigPair>() {
expand!(&mut self.out, cap.as_ref();).unwrap()
}
write!(&mut self.out, "\x02").unwrap();
}
}
impl<'a, W: 'a> Write for TermWriter<'a, W>
where W: Write
{
fn flush(&mut self) -> Result<(), io::Error> {
self.out.flush()
}
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.out.write(buf)
}
}
#[derive(Debug)]
struct TextSegment {
text: String,
fmt: FormatLike,
pre_calculated_length: usize,
}
impl TextSegment {
pub fn new(text: impl Into<String>, fmt: FormatLike) -> Self {
let text = text.into();
let len = text.chars().count();
TextSegment {
text,
fmt,
pre_calculated_length: len,
}
}
} | }
} | random_line_split |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{self as pswap_distribution, Config};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
balance, fixed, fixed_from_basis_points, AssetName, AssetSymbol, BalancePrecision, Fixed,
FromGenericPair,
};
use currencies::BasicCurrencyAdapter;
use frame_support::traits::GenesisBuild;
use frame_support::weights::Weight;
use frame_support::{construct_runtime, parameter_types};
use frame_system;
use hex_literal::hex;
use permissions::Scope;
use sp_core::H256;
use sp_runtime::testing::Header;
use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Zero};
use sp_runtime::{AccountId32, Perbill};
pub type AccountId = AccountId32;
pub type BlockNumber = u64;
pub type Amount = i128;
pub type AssetId = common::AssetId32<common::PredefinedAssetId>;
pub type TechAccountId = common::TechAccountId<AccountId, TechAssetId, DEXId>;
type TechAssetId = common::TechAssetId<common::PredefinedAssetId>;
type DEXId = common::DEXId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
pub fn alice() -> AccountId {
AccountId32::from([1u8; 32])
}
pub fn fees_account_a() -> AccountId {
AccountId32::from([2u8; 32])
}
pub fn fees_account_b() -> AccountId {
AccountId32::from([3u8; 32])
}
pub fn liquidity_provider_a() -> AccountId {
AccountId32::from([4u8; 32])
}
pub fn liquidity_provider_b() -> AccountId |
pub fn liquidity_provider_c() -> AccountId {
AccountId32::from([6u8; 32])
}
pub const DEX_A_ID: DEXId = common::DEXId::Polkaswap;
parameter_types! {
pub GetBaseAssetId: AssetId = common::XOR.into();
pub GetIncentiveAssetId: AssetId = common::PSWAP.into();
pub const PoolTokenAId: AssetId = common::AssetId32::from_bytes(hex!("0211110000000000000000000000000000000000000000000000000000000000"));
pub const PoolTokenBId: AssetId = common::AssetId32::from_bytes(hex!("0222220000000000000000000000000000000000000000000000000000000000"));
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const GetDefaultFee: u16 = 30;
pub const GetDefaultProtocolFee: u16 = 0;
pub GetPswapDistributionTechAccountId: TechAccountId = {
let tech_account_id = TechAccountId::from_generic_pair(
crate::TECH_ACCOUNT_PREFIX.to_vec(),
crate::TECH_ACCOUNT_MAIN.to_vec(),
);
tech_account_id
};
pub GetPswapDistributionAccountId: AccountId = {
let tech_account_id = GetPswapDistributionTechAccountId::get();
let account_id =
technical::Module::<Runtime>::tech_account_id_to_account_id(&tech_account_id)
.expect("Failed to get ordinary account id for technical account id.");
account_id
};
pub const GetDefaultSubscriptionFrequency: BlockNumber = 10;
pub const GetBurnUpdateFrequency: BlockNumber = 3;
pub const ExistentialDeposit: u128 = 0;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
pub GetFee: Fixed = fixed_from_basis_points(30u16);
pub GetParliamentAccountId: AccountId = AccountId32::from([7u8; 32]);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
PswapDistribution: pswap_distribution::{Module, Call, Config<T>, Storage, Event<T>},
Tokens: tokens::{Module, Call, Config<T>, Storage, Event<T>},
Permissions: permissions::{Module, Call, Config<T>, Storage, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Config<T>, Storage, Event<T>},
Technical: technical::{Module, Call, Storage, Event<T>},
DexManager: dex_manager::{Module, Call, Storage},
}
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl Config for Runtime {
type Event = Event;
type GetIncentiveAssetId = GetIncentiveAssetId;
type LiquidityProxy = ();
type CompatBalance = Balance;
type GetDefaultSubscriptionFrequency = GetDefaultSubscriptionFrequency;
type GetBurnUpdateFrequency = GetBurnUpdateFrequency;
type GetTechnicalAccountId = GetPswapDistributionAccountId;
type EnsureDEXManager = DexManager;
type OnPswapBurnedAggregator = ();
type WeightInfo = ();
type GetParliamentAccountId = GetParliamentAccountId;
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency =
BasicCurrencyAdapter<Runtime, pallet_balances::Module<Runtime>, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = AssetId;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = DEXId;
type LstId = common::LiquiditySourceType;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl technical::Config for Runtime {
type Event = Event;
type TechAssetId = TechAssetId;
type TechAccountId = TechAccountId;
type Trigger = ();
type Condition = ();
type SwapAction = ();
type WeightInfo = ();
}
impl dex_manager::Config for Runtime {}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, AssetId, Balance)>,
endowed_assets: Vec<(
AssetId,
AccountId,
AssetSymbol,
AssetName,
BalancePrecision,
Balance,
bool,
)>,
initial_permission_owners: Vec<(u32, Scope, Vec<AccountId>)>,
initial_permissions: Vec<(AccountId, Scope, Vec<u32>)>,
subscribed_accounts: Vec<(AccountId, (DEXId, AssetId, BlockNumber, BlockNumber))>,
burn_info: (Fixed, Fixed, Fixed),
}
impl ExtBuilder {
pub fn uninitialized() -> Self {
Self {
endowed_accounts: Vec::new(),
endowed_assets: vec![(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOL".to_vec()),
AssetName(b"Pool Token".to_vec()),
18,
Balance::from(0u32),
true,
)],
initial_permission_owners: Vec::new(),
initial_permissions: Vec::new(),
subscribed_accounts: Vec::new(),
burn_info: (fixed!(0), fixed!(0.10), fixed!(0.30)),
}
}
}
impl ExtBuilder {
pub fn with_accounts(accounts: Vec<(AccountId, AssetId, Balance)>) -> Self {
let permissioned_account_id = GetPswapDistributionAccountId::get();
Self {
endowed_accounts: accounts,
endowed_assets: vec![
(
common::XOR.into(),
alice(),
AssetSymbol(b"XOR".to_vec()),
AssetName(b"SORA".to_vec()),
18,
Balance::zero(),
true,
),
(
common::PSWAP.into(),
alice(),
AssetSymbol(b"PSWAP".to_vec()),
AssetName(b"Polkaswap".to_vec()),
10,
Balance::zero(),
true,
),
(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOLA".to_vec()),
AssetName(b"Pool A".to_vec()),
18,
Balance::zero(),
true,
),
(
PoolTokenBId::get(),
alice(),
AssetSymbol(b"POOLB".to_vec()),
AssetName(b"Pool B".to_vec()),
18,
Balance::zero(),
true,
),
],
initial_permission_owners: vec![],
initial_permissions: vec![(
permissioned_account_id,
Scope::Unlimited,
vec![permissions::MINT, permissions::BURN],
)],
subscribed_accounts: vec![
(fees_account_a(), (DEX_A_ID, PoolTokenAId::get(), 5, 0)),
(fees_account_b(), (DEX_A_ID, PoolTokenBId::get(), 7, 0)),
],
burn_info: (fixed!(0.1), fixed!(0.10), fixed!(0.40)),
}
}
}
impl Default for ExtBuilder {
fn default() -> Self {
ExtBuilder::with_accounts(vec![
(fees_account_a(), common::XOR.into(), balance!(1)),
(fees_account_a(), common::PSWAP.into(), balance!(6)),
(liquidity_provider_a(), PoolTokenAId::get(), balance!(3)),
(liquidity_provider_b(), PoolTokenAId::get(), balance!(2)),
(liquidity_provider_c(), PoolTokenAId::get(), balance!(1)),
(liquidity_provider_a(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_b(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_c(), PoolTokenBId::get(), balance!(10)),
])
}
}
impl ExtBuilder {
pub fn build(self) -> sp_io::TestExternalities {
let mut t = SystemConfig::default().build_storage::<Runtime>().unwrap();
let mut vec = self
.endowed_accounts
.iter()
.map(|(acc,..)| (acc.clone(), 0))
.chain(vec![
(alice(), 0),
(fees_account_a(), 0),
(fees_account_b(), 0),
(GetPswapDistributionAccountId::get(), 0),
(GetParliamentAccountId::get(), 0),
])
.collect::<Vec<_>>();
vec.sort_by_key(|x| x.0.clone());
vec.dedup_by(|x, y| x.0 == y.0);
BalancesConfig { balances: vec }
.assimilate_storage(&mut t)
.unwrap();
PermissionsConfig {
initial_permissions: self.initial_permissions,
initial_permission_owners: self.initial_permission_owners,
}
.assimilate_storage(&mut t)
.unwrap();
TokensConfig {
endowed_accounts: self.endowed_accounts,
}
.assimilate_storage(&mut t)
.unwrap();
AssetsConfig {
endowed_assets: self.endowed_assets,
}
.assimilate_storage(&mut t)
.unwrap();
PswapDistributionConfig {
subscribed_accounts: self.subscribed_accounts,
burn_info: self.burn_info,
}
.assimilate_storage(&mut t)
.unwrap();
t.into()
}
}
| {
AccountId32::from([5u8; 32])
} | identifier_body |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{self as pswap_distribution, Config};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
balance, fixed, fixed_from_basis_points, AssetName, AssetSymbol, BalancePrecision, Fixed,
FromGenericPair,
};
use currencies::BasicCurrencyAdapter;
use frame_support::traits::GenesisBuild;
use frame_support::weights::Weight;
use frame_support::{construct_runtime, parameter_types};
use frame_system;
use hex_literal::hex;
use permissions::Scope;
use sp_core::H256;
use sp_runtime::testing::Header;
use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Zero};
use sp_runtime::{AccountId32, Perbill};
pub type AccountId = AccountId32;
pub type BlockNumber = u64;
pub type Amount = i128;
pub type AssetId = common::AssetId32<common::PredefinedAssetId>;
pub type TechAccountId = common::TechAccountId<AccountId, TechAssetId, DEXId>;
type TechAssetId = common::TechAssetId<common::PredefinedAssetId>;
type DEXId = common::DEXId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
pub fn alice() -> AccountId {
AccountId32::from([1u8; 32])
}
pub fn fees_account_a() -> AccountId {
AccountId32::from([2u8; 32])
}
pub fn fees_account_b() -> AccountId {
AccountId32::from([3u8; 32])
}
pub fn liquidity_provider_a() -> AccountId {
AccountId32::from([4u8; 32])
}
pub fn liquidity_provider_b() -> AccountId {
AccountId32::from([5u8; 32])
}
pub fn liquidity_provider_c() -> AccountId {
AccountId32::from([6u8; 32])
}
pub const DEX_A_ID: DEXId = common::DEXId::Polkaswap;
parameter_types! {
pub GetBaseAssetId: AssetId = common::XOR.into();
pub GetIncentiveAssetId: AssetId = common::PSWAP.into();
pub const PoolTokenAId: AssetId = common::AssetId32::from_bytes(hex!("0211110000000000000000000000000000000000000000000000000000000000"));
pub const PoolTokenBId: AssetId = common::AssetId32::from_bytes(hex!("0222220000000000000000000000000000000000000000000000000000000000"));
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const GetDefaultFee: u16 = 30;
pub const GetDefaultProtocolFee: u16 = 0;
pub GetPswapDistributionTechAccountId: TechAccountId = {
let tech_account_id = TechAccountId::from_generic_pair(
crate::TECH_ACCOUNT_PREFIX.to_vec(),
crate::TECH_ACCOUNT_MAIN.to_vec(),
);
tech_account_id
};
pub GetPswapDistributionAccountId: AccountId = {
let tech_account_id = GetPswapDistributionTechAccountId::get();
let account_id =
technical::Module::<Runtime>::tech_account_id_to_account_id(&tech_account_id)
.expect("Failed to get ordinary account id for technical account id.");
account_id
};
pub const GetDefaultSubscriptionFrequency: BlockNumber = 10;
pub const GetBurnUpdateFrequency: BlockNumber = 3;
pub const ExistentialDeposit: u128 = 0;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
pub GetFee: Fixed = fixed_from_basis_points(30u16);
pub GetParliamentAccountId: AccountId = AccountId32::from([7u8; 32]);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
PswapDistribution: pswap_distribution::{Module, Call, Config<T>, Storage, Event<T>},
Tokens: tokens::{Module, Call, Config<T>, Storage, Event<T>},
Permissions: permissions::{Module, Call, Config<T>, Storage, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Config<T>, Storage, Event<T>},
Technical: technical::{Module, Call, Storage, Event<T>},
DexManager: dex_manager::{Module, Call, Storage},
}
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl Config for Runtime {
type Event = Event;
type GetIncentiveAssetId = GetIncentiveAssetId;
type LiquidityProxy = ();
type CompatBalance = Balance;
type GetDefaultSubscriptionFrequency = GetDefaultSubscriptionFrequency;
type GetBurnUpdateFrequency = GetBurnUpdateFrequency;
type GetTechnicalAccountId = GetPswapDistributionAccountId;
type EnsureDEXManager = DexManager;
type OnPswapBurnedAggregator = ();
type WeightInfo = ();
type GetParliamentAccountId = GetParliamentAccountId;
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency =
BasicCurrencyAdapter<Runtime, pallet_balances::Module<Runtime>, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = AssetId;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = DEXId;
type LstId = common::LiquiditySourceType;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl technical::Config for Runtime {
type Event = Event;
type TechAssetId = TechAssetId;
type TechAccountId = TechAccountId;
type Trigger = ();
type Condition = ();
type SwapAction = ();
type WeightInfo = ();
}
impl dex_manager::Config for Runtime {}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, AssetId, Balance)>,
endowed_assets: Vec<(
AssetId,
AccountId,
AssetSymbol,
AssetName,
BalancePrecision,
Balance,
bool,
)>,
initial_permission_owners: Vec<(u32, Scope, Vec<AccountId>)>,
initial_permissions: Vec<(AccountId, Scope, Vec<u32>)>,
subscribed_accounts: Vec<(AccountId, (DEXId, AssetId, BlockNumber, BlockNumber))>,
burn_info: (Fixed, Fixed, Fixed),
}
impl ExtBuilder {
pub fn uninitialized() -> Self {
Self {
endowed_accounts: Vec::new(),
endowed_assets: vec![(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOL".to_vec()),
AssetName(b"Pool Token".to_vec()),
18,
Balance::from(0u32),
true,
)],
initial_permission_owners: Vec::new(),
initial_permissions: Vec::new(),
subscribed_accounts: Vec::new(),
burn_info: (fixed!(0), fixed!(0.10), fixed!(0.30)),
}
}
}
impl ExtBuilder {
pub fn | (accounts: Vec<(AccountId, AssetId, Balance)>) -> Self {
let permissioned_account_id = GetPswapDistributionAccountId::get();
Self {
endowed_accounts: accounts,
endowed_assets: vec![
(
common::XOR.into(),
alice(),
AssetSymbol(b"XOR".to_vec()),
AssetName(b"SORA".to_vec()),
18,
Balance::zero(),
true,
),
(
common::PSWAP.into(),
alice(),
AssetSymbol(b"PSWAP".to_vec()),
AssetName(b"Polkaswap".to_vec()),
10,
Balance::zero(),
true,
),
(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOLA".to_vec()),
AssetName(b"Pool A".to_vec()),
18,
Balance::zero(),
true,
),
(
PoolTokenBId::get(),
alice(),
AssetSymbol(b"POOLB".to_vec()),
AssetName(b"Pool B".to_vec()),
18,
Balance::zero(),
true,
),
],
initial_permission_owners: vec![],
initial_permissions: vec![(
permissioned_account_id,
Scope::Unlimited,
vec![permissions::MINT, permissions::BURN],
)],
subscribed_accounts: vec![
(fees_account_a(), (DEX_A_ID, PoolTokenAId::get(), 5, 0)),
(fees_account_b(), (DEX_A_ID, PoolTokenBId::get(), 7, 0)),
],
burn_info: (fixed!(0.1), fixed!(0.10), fixed!(0.40)),
}
}
}
impl Default for ExtBuilder {
fn default() -> Self {
ExtBuilder::with_accounts(vec![
(fees_account_a(), common::XOR.into(), balance!(1)),
(fees_account_a(), common::PSWAP.into(), balance!(6)),
(liquidity_provider_a(), PoolTokenAId::get(), balance!(3)),
(liquidity_provider_b(), PoolTokenAId::get(), balance!(2)),
(liquidity_provider_c(), PoolTokenAId::get(), balance!(1)),
(liquidity_provider_a(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_b(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_c(), PoolTokenBId::get(), balance!(10)),
])
}
}
impl ExtBuilder {
pub fn build(self) -> sp_io::TestExternalities {
let mut t = SystemConfig::default().build_storage::<Runtime>().unwrap();
let mut vec = self
.endowed_accounts
.iter()
.map(|(acc,..)| (acc.clone(), 0))
.chain(vec![
(alice(), 0),
(fees_account_a(), 0),
(fees_account_b(), 0),
(GetPswapDistributionAccountId::get(), 0),
(GetParliamentAccountId::get(), 0),
])
.collect::<Vec<_>>();
vec.sort_by_key(|x| x.0.clone());
vec.dedup_by(|x, y| x.0 == y.0);
BalancesConfig { balances: vec }
.assimilate_storage(&mut t)
.unwrap();
PermissionsConfig {
initial_permissions: self.initial_permissions,
initial_permission_owners: self.initial_permission_owners,
}
.assimilate_storage(&mut t)
.unwrap();
TokensConfig {
endowed_accounts: self.endowed_accounts,
}
.assimilate_storage(&mut t)
.unwrap();
AssetsConfig {
endowed_assets: self.endowed_assets,
}
.assimilate_storage(&mut t)
.unwrap();
PswapDistributionConfig {
subscribed_accounts: self.subscribed_accounts,
burn_info: self.burn_info,
}
.assimilate_storage(&mut t)
.unwrap();
t.into()
}
}
| with_accounts | identifier_name |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{self as pswap_distribution, Config};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
balance, fixed, fixed_from_basis_points, AssetName, AssetSymbol, BalancePrecision, Fixed,
FromGenericPair,
};
use currencies::BasicCurrencyAdapter;
use frame_support::traits::GenesisBuild;
use frame_support::weights::Weight;
use frame_support::{construct_runtime, parameter_types};
use frame_system;
use hex_literal::hex;
use permissions::Scope;
use sp_core::H256;
use sp_runtime::testing::Header;
use sp_runtime::traits::{BlakeTwo256, IdentityLookup, Zero};
use sp_runtime::{AccountId32, Perbill};
pub type AccountId = AccountId32;
pub type BlockNumber = u64;
pub type Amount = i128;
pub type AssetId = common::AssetId32<common::PredefinedAssetId>;
pub type TechAccountId = common::TechAccountId<AccountId, TechAssetId, DEXId>;
type TechAssetId = common::TechAssetId<common::PredefinedAssetId>;
type DEXId = common::DEXId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
pub fn alice() -> AccountId {
AccountId32::from([1u8; 32])
}
pub fn fees_account_a() -> AccountId {
AccountId32::from([2u8; 32])
}
pub fn fees_account_b() -> AccountId {
AccountId32::from([3u8; 32])
}
pub fn liquidity_provider_a() -> AccountId {
AccountId32::from([4u8; 32])
}
pub fn liquidity_provider_b() -> AccountId {
AccountId32::from([5u8; 32])
}
pub fn liquidity_provider_c() -> AccountId {
AccountId32::from([6u8; 32])
}
pub const DEX_A_ID: DEXId = common::DEXId::Polkaswap;
parameter_types! {
pub GetBaseAssetId: AssetId = common::XOR.into();
pub GetIncentiveAssetId: AssetId = common::PSWAP.into();
pub const PoolTokenAId: AssetId = common::AssetId32::from_bytes(hex!("0211110000000000000000000000000000000000000000000000000000000000"));
pub const PoolTokenBId: AssetId = common::AssetId32::from_bytes(hex!("0222220000000000000000000000000000000000000000000000000000000000"));
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const GetDefaultFee: u16 = 30;
pub const GetDefaultProtocolFee: u16 = 0;
pub GetPswapDistributionTechAccountId: TechAccountId = {
let tech_account_id = TechAccountId::from_generic_pair(
crate::TECH_ACCOUNT_PREFIX.to_vec(),
crate::TECH_ACCOUNT_MAIN.to_vec(),
);
tech_account_id
};
pub GetPswapDistributionAccountId: AccountId = {
let tech_account_id = GetPswapDistributionTechAccountId::get();
let account_id =
technical::Module::<Runtime>::tech_account_id_to_account_id(&tech_account_id)
.expect("Failed to get ordinary account id for technical account id.");
account_id
};
pub const GetDefaultSubscriptionFrequency: BlockNumber = 10;
pub const GetBurnUpdateFrequency: BlockNumber = 3;
pub const ExistentialDeposit: u128 = 0;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
pub GetFee: Fixed = fixed_from_basis_points(30u16);
pub GetParliamentAccountId: AccountId = AccountId32::from([7u8; 32]);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
PswapDistribution: pswap_distribution::{Module, Call, Config<T>, Storage, Event<T>},
Tokens: tokens::{Module, Call, Config<T>, Storage, Event<T>},
Permissions: permissions::{Module, Call, Config<T>, Storage, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Config<T>, Storage, Event<T>},
Technical: technical::{Module, Call, Storage, Event<T>},
DexManager: dex_manager::{Module, Call, Storage},
}
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl Config for Runtime {
type Event = Event;
type GetIncentiveAssetId = GetIncentiveAssetId;
type LiquidityProxy = ();
type CompatBalance = Balance;
type GetDefaultSubscriptionFrequency = GetDefaultSubscriptionFrequency;
type GetBurnUpdateFrequency = GetBurnUpdateFrequency;
type GetTechnicalAccountId = GetPswapDistributionAccountId;
type EnsureDEXManager = DexManager;
type OnPswapBurnedAggregator = ();
type WeightInfo = ();
type GetParliamentAccountId = GetParliamentAccountId;
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency =
BasicCurrencyAdapter<Runtime, pallet_balances::Module<Runtime>, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = AssetId;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = DEXId;
type LstId = common::LiquiditySourceType;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl technical::Config for Runtime {
type Event = Event;
type TechAssetId = TechAssetId;
type TechAccountId = TechAccountId;
type Trigger = ();
type Condition = ();
type SwapAction = ();
type WeightInfo = ();
}
impl dex_manager::Config for Runtime {}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, AssetId, Balance)>,
endowed_assets: Vec<(
AssetId,
AccountId,
AssetSymbol,
AssetName,
BalancePrecision,
Balance,
bool,
)>,
initial_permission_owners: Vec<(u32, Scope, Vec<AccountId>)>,
initial_permissions: Vec<(AccountId, Scope, Vec<u32>)>,
subscribed_accounts: Vec<(AccountId, (DEXId, AssetId, BlockNumber, BlockNumber))>,
burn_info: (Fixed, Fixed, Fixed),
}
impl ExtBuilder {
pub fn uninitialized() -> Self {
Self {
endowed_accounts: Vec::new(),
endowed_assets: vec![(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOL".to_vec()),
AssetName(b"Pool Token".to_vec()),
18,
Balance::from(0u32),
true,
)],
initial_permission_owners: Vec::new(),
initial_permissions: Vec::new(),
subscribed_accounts: Vec::new(),
burn_info: (fixed!(0), fixed!(0.10), fixed!(0.30)),
}
}
}
impl ExtBuilder {
pub fn with_accounts(accounts: Vec<(AccountId, AssetId, Balance)>) -> Self {
let permissioned_account_id = GetPswapDistributionAccountId::get();
Self {
endowed_accounts: accounts,
endowed_assets: vec![
(
common::XOR.into(),
alice(),
AssetSymbol(b"XOR".to_vec()),
AssetName(b"SORA".to_vec()),
18,
Balance::zero(),
true,
),
(
common::PSWAP.into(),
alice(),
AssetSymbol(b"PSWAP".to_vec()),
AssetName(b"Polkaswap".to_vec()),
10,
Balance::zero(),
true,
),
(
PoolTokenAId::get(),
alice(),
AssetSymbol(b"POOLA".to_vec()),
AssetName(b"Pool A".to_vec()),
18,
Balance::zero(),
true,
),
(
PoolTokenBId::get(),
alice(),
AssetSymbol(b"POOLB".to_vec()),
AssetName(b"Pool B".to_vec()),
18,
Balance::zero(),
true,
),
],
initial_permission_owners: vec![],
initial_permissions: vec![(
permissioned_account_id,
Scope::Unlimited,
vec![permissions::MINT, permissions::BURN],
)],
subscribed_accounts: vec![
(fees_account_a(), (DEX_A_ID, PoolTokenAId::get(), 5, 0)),
(fees_account_b(), (DEX_A_ID, PoolTokenBId::get(), 7, 0)),
],
burn_info: (fixed!(0.1), fixed!(0.10), fixed!(0.40)),
}
}
}
impl Default for ExtBuilder {
fn default() -> Self {
ExtBuilder::with_accounts(vec![
(fees_account_a(), common::XOR.into(), balance!(1)),
(fees_account_a(), common::PSWAP.into(), balance!(6)),
(liquidity_provider_a(), PoolTokenAId::get(), balance!(3)),
(liquidity_provider_b(), PoolTokenAId::get(), balance!(2)),
(liquidity_provider_c(), PoolTokenAId::get(), balance!(1)),
(liquidity_provider_a(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_b(), PoolTokenBId::get(), balance!(10)),
(liquidity_provider_c(), PoolTokenBId::get(), balance!(10)),
])
}
}
impl ExtBuilder {
pub fn build(self) -> sp_io::TestExternalities {
let mut t = SystemConfig::default().build_storage::<Runtime>().unwrap();
let mut vec = self
.endowed_accounts
.iter()
.map(|(acc,..)| (acc.clone(), 0))
.chain(vec![
(alice(), 0),
(fees_account_a(), 0),
(fees_account_b(), 0),
(GetPswapDistributionAccountId::get(), 0),
(GetParliamentAccountId::get(), 0),
])
.collect::<Vec<_>>();
vec.sort_by_key(|x| x.0.clone());
vec.dedup_by(|x, y| x.0 == y.0);
BalancesConfig { balances: vec }
.assimilate_storage(&mut t)
.unwrap();
PermissionsConfig {
initial_permissions: self.initial_permissions, | initial_permission_owners: self.initial_permission_owners,
}
.assimilate_storage(&mut t)
.unwrap();
TokensConfig {
endowed_accounts: self.endowed_accounts,
}
.assimilate_storage(&mut t)
.unwrap();
AssetsConfig {
endowed_assets: self.endowed_assets,
}
.assimilate_storage(&mut t)
.unwrap();
PswapDistributionConfig {
subscribed_accounts: self.subscribed_accounts,
burn_info: self.burn_info,
}
.assimilate_storage(&mut t)
.unwrap();
t.into()
}
} | random_line_split |
|
list_view_items.rs | use std::cell::Cell;
use std::ptr::NonNull;
use crate::aliases::WinResult;
use crate::co;
use crate::handles::HWND;
use crate::msg::lvm;
use crate::structs::{LVFINDINFO, LVHITTESTINFO, LVITEM, RECT};
use crate::various::WString;
/// Exposes item methods of a [`ListView`](crate::gui::ListView) control.
///
/// You cannot directly instantiate this object, it is created internally by the
/// control.
pub struct ListViewItems {
hwnd_ptr: Cell<NonNull<HWND>>,
}
impl ListViewItems {
pub(in crate::gui::native_controls) fn new() -> ListViewItems {
Self {
hwnd_ptr: Cell::new(NonNull::from(&HWND::NULL)), // initially invalid
}
}
pub(in crate::gui::native_controls) fn set_hwnd_ref(&self, hwnd_ref: &HWND) {
self.hwnd_ptr.replace(NonNull::from(hwnd_ref));
}
pub(in crate::gui::native_controls) fn hwnd(&self) -> HWND {
unsafe { *self.hwnd_ptr.get().as_ref() }
}
/// Appends a new item by sending an
/// [`LVM_INSERTITEM`](crate::msg::lvm::InsertItem) message, and returns its
/// index.
///
/// The texts are relative to each column.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::gui;
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// my_list.items().add(
/// &[
/// "First column text",
/// "Second column text",
/// ],
/// None, // no icon; requires set_image_list() before
/// ).unwrap();
/// ```
///
/// # Panics
///
/// Panics if `texts` is empty, or if the number of texts is greater than
/// the number of columns.
pub fn add<S: AsRef<str>>(&self,
texts: &[S], icon_index: Option<u32>) -> WinResult<u32>
{
if texts.is_empty() {
panic!("No texts passed when adding a ListView item.");
}
let mut lvi = LVITEM::default();
lvi.mask = co::LVIF::TEXT | co::LVIF::IMAGE;
lvi.iItem = 0x0fff_ffff; // insert as the last one
lvi.iImage = match icon_index {
Some(idx) => idx as _,
None => -1,
};
let mut wtext = WString::from_str(texts[0].as_ref());
lvi.set_pszText(Some(&mut wtext));
let new_idx = self.hwnd().SendMessage(lvm::InsertItem { lvitem: &lvi })?;
for (idx, text) in texts.iter().skip(1).enumerate() {
self.set_text(new_idx, idx as u32 + 1, text.as_ref())?;
}
Ok(new_idx)
}
/// Retrieves the total number of items by sending an
/// [`LVM_GETITEMCOUNT`](crate::msg::lvm::GetItemCount) message.
pub fn count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetItemCount {})
}
/// Deletes the items at the given indexes by sending an
/// [`LVM_DELETEITEM`](crate::msg::lvm::DeleteItem) message.
///
/// The indexes are iterated backwards, so the last item will be deleted
/// first.
pub fn delete(&self, item_indexes: &[u32]) -> WinResult<()> {
for idx in item_indexes.iter().rev() {
self.hwnd().SendMessage(lvm::DeleteItem {
index: *idx,
})?;
}
Ok(())
}
/// Deletes all items by sending an
/// [`LVM_DELETEALLITEMS`](crate::msg::lvm::DeleteAllItems) message.
pub fn delete_all(&self) -> WinResult<()> {
self.hwnd().SendMessage(lvm::DeleteAllItems {})
}
/// Deletes the selected items by sending
/// [`LVM_DELETEITEM`](crate::msg::lvm::DeleteItem) messages.
pub fn delete_selected(&self) -> WinResult<()> {
loop {
match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::SELECTED,
}) {
Some(index) => self.hwnd().SendMessage(lvm::DeleteItem { index })?,
None => break,
};
}
Ok(())
}
/// Scrolls the list by sending an
/// [`LVM_ENSUREVISIBLE`](crate::msg::lvm::EnsureVisible) message so that an
/// item is visible in the list.
pub fn ensure_visible(&self, item_index: u32) -> WinResult<()> {
self.hwnd().SendMessage(lvm::EnsureVisible {
index: item_index,
entirely_visible: true,
})
}
/// Searches for an item with the given text, case-insensitive, by sending
/// an [`LVM_FINDITEM`](crate::msg::lvm::FindItem) message.
pub fn find(&self, text: &str) -> Option<u32> {
let mut buf = WString::from_str(text);
let mut lvfi = LVFINDINFO::default();
lvfi.flags = co::LVFI::STRING;
lvfi.set_psz(Some(&mut buf));
self.hwnd().SendMessage(lvm::FindItem {
start_index: None,
lvfindinfo: &mut lvfi,
})
}
/// Retrieves the index of the focused item by sending an
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) message.
pub fn focused(&self) -> Option<u32> {
self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::FOCUSED,
})
}
/// Retrieves the item at the specified position by sending an
/// [`LVM_HITTEST`](crate::msg::lvm::HitTest) message
pub fn hit_test(&self, info: &mut LVHITTESTINFO) -> Option<u32> {
self.hwnd().SendMessage(lvm::HitTest { info })
}
/// Tells if the item is the focused one by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_focused(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::FOCUSED,
}).has(co::LVIS::FOCUSED)
}
/// Tells if the item is selected by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_selected(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::SELECTED,
}).has(co::LVIS::SELECTED)
}
/// Tells if the item is currently visible by sending an
/// [`LVM_ISITEMVISIBLE`](crate::msg::lvm::IsItemVisible) message.
pub fn is_visible(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::IsItemVisible { index: item_index })
}
/// Retrieves the actual index of the unique ID by sending an
/// [`LVM_MAPIDTOINDEX`](crate::msg::lvm::MapIdToIndex) message.
pub fn map_id_to_index(&self, item_id: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIdToIndex { id: item_id })
}
/// Retrieves an unique ID for the given index by sending an
/// [`LVM_MAPINDEXTOID`](crate::msg::lvm::MapIndexToId) message.
pub fn map_index_to_id(&self, item_index: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIndexToId { index: item_index })
}
/// Retrieves the bound rectangle of item by sending an
/// [`LVM_GETITEMRECT`](crate::msg::lvm::GetItemRect) message.
pub fn rect(&self, item_index: u32, portion: co::LVIR) -> WinResult<RECT> {
let mut rc = RECT::default();
self.hwnd().SendMessage(lvm::GetItemRect {
index: item_index,
rect: &mut rc,
portion,
})?;
Ok(rc)
}
/// Retrieves the indexes of the selected items by sending
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) messages.
pub fn selected(&self) -> Vec<u32> {
let mut items = Vec::with_capacity(self.selected_count() as _);
let mut idx = None;
loop {
idx = match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: idx,
relationship: co::LVNI::SELECTED,
}) {
Some(idx) => {
items.push(idx);
Some(idx)
},
None => break,
};
}
items
}
/// Retrieves the number of selected items by sending an
/// [`LVM_GETSELECTEDCOUNT`](crate::msg::lvm::GetSelectedCount) message.
pub fn selected_count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetSelectedCount {})
}
/// Sets the focused item by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_focused(&self, item_index: u32) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::FOCUSED;
lvi.state = co::LVIS::FOCUSED;
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(item_index),
lvitem: &lvi,
})
}
/// Sets or remove the selection from the given item indexes by sending
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) messages.
pub fn | (&self,
set: bool, item_indexes: &[u32]) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
for idx in item_indexes.iter() {
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(*idx),
lvitem: &lvi,
})?;
}
Ok(())
}
/// Sets or remove the selection for all items by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_selected_all(&self, set: bool) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
self.hwnd().SendMessage(lvm::SetItemState {
index: None,
lvitem: &lvi,
})
}
/// Sets the text of an item under a column by sending an
/// [`LVM_SETITEMTEXT`](crate::msg::lvm::SetItemText) message.
pub fn set_text(&self,
item_index: u32, column_index: u32, text: &str) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.iSubItem = column_index as _;
let mut wtext = WString::from_str(text);
lvi.set_pszText(Some(&mut wtext));
self.hwnd().SendMessage(lvm::SetItemText {
index: item_index,
lvitem: &lvi,
})
}
/// Retrieves the text of an item under a column by sending an
/// [`LVM_GETITEMTEXT`](crate::msg::lvm::GetItemText) message.
///
/// The passed buffer will be automatically allocated.
///
/// This method can be more performant than
/// [`text_str`](crate::gui::ListViewItems::text_str) because the buffer can be
/// reused, avoiding multiple allocations. However, it has the inconvenient
/// of the manual conversion from [`WString`](crate::WString) to `String`.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::{gui, WString};
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// let mut buf = WString::default();
/// my_list.items().text(0, 2, &mut buf); // 1st item, 3rd column
///
/// println!("Text: {}", buf.to_string());
/// ```
pub fn text(&self, item_index: u32, column_index: u32, buf: &mut WString) {
Self::text_retrieve(self.hwnd(), item_index, column_index, buf)
}
pub(in crate::gui::native_controls) fn text_retrieve(
hwnd: HWND, item_index: u32, column_index: u32, buf: &mut WString)
{
// Static method because it's also used by ListViewColumns.
// https://forums.codeguru.com/showthread.php?351972-Getting-listView-item-text-length
const BLOCK: usize = 64; // arbitrary
let mut buf_sz = BLOCK;
let mut buf = buf;
loop {
let mut lvi = LVITEM::default();
lvi.iSubItem = column_index as _;
buf.realloc_buffer(buf_sz);
lvi.set_pszText(Some(&mut buf));
let nchars = hwnd.SendMessage(lvm::GetItemText { // char count without terminating null
index: item_index,
lvitem: &mut lvi,
});
if (nchars as usize) + 1 < buf_sz { // to break, must have at least 1 char gap
break;
}
buf_sz += BLOCK; // increase buffer size to try again
}
}
/// A more convenient [`text`](crate::gui::ListViewItems::text), which
/// directly returns a `String` instead of requiring an external buffer.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::gui;
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// println!("Text: {}", my_list.items().text(0, 2)); // 1st item, 3rd column
/// ```
pub fn text_str(&self, item_index: u32, column_index: u32) -> String {
let mut buf = WString::default();
self.text(item_index, column_index, &mut buf);
buf.to_string()
}
}
| set_selected | identifier_name |
list_view_items.rs | use std::cell::Cell;
use std::ptr::NonNull;
use crate::aliases::WinResult;
use crate::co;
use crate::handles::HWND;
use crate::msg::lvm;
use crate::structs::{LVFINDINFO, LVHITTESTINFO, LVITEM, RECT};
use crate::various::WString;
/// Exposes item methods of a [`ListView`](crate::gui::ListView) control.
///
/// You cannot directly instantiate this object, it is created internally by the
/// control.
pub struct ListViewItems {
hwnd_ptr: Cell<NonNull<HWND>>,
}
impl ListViewItems {
pub(in crate::gui::native_controls) fn new() -> ListViewItems {
Self {
hwnd_ptr: Cell::new(NonNull::from(&HWND::NULL)), // initially invalid
}
}
pub(in crate::gui::native_controls) fn set_hwnd_ref(&self, hwnd_ref: &HWND) {
self.hwnd_ptr.replace(NonNull::from(hwnd_ref));
}
pub(in crate::gui::native_controls) fn hwnd(&self) -> HWND {
unsafe { *self.hwnd_ptr.get().as_ref() }
}
/// Appends a new item by sending an
/// [`LVM_INSERTITEM`](crate::msg::lvm::InsertItem) message, and returns its
/// index.
///
/// The texts are relative to each column.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::gui;
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// my_list.items().add(
/// &[
/// "First column text",
/// "Second column text",
/// ],
/// None, // no icon; requires set_image_list() before
/// ).unwrap();
/// ```
///
/// # Panics
///
/// Panics if `texts` is empty, or if the number of texts is greater than
/// the number of columns.
pub fn add<S: AsRef<str>>(&self,
texts: &[S], icon_index: Option<u32>) -> WinResult<u32>
{
if texts.is_empty() {
panic!("No texts passed when adding a ListView item.");
}
let mut lvi = LVITEM::default();
lvi.mask = co::LVIF::TEXT | co::LVIF::IMAGE;
lvi.iItem = 0x0fff_ffff; // insert as the last one
lvi.iImage = match icon_index {
Some(idx) => idx as _,
None => -1,
};
let mut wtext = WString::from_str(texts[0].as_ref());
lvi.set_pszText(Some(&mut wtext));
let new_idx = self.hwnd().SendMessage(lvm::InsertItem { lvitem: &lvi })?;
for (idx, text) in texts.iter().skip(1).enumerate() {
self.set_text(new_idx, idx as u32 + 1, text.as_ref())?;
}
Ok(new_idx)
}
/// Retrieves the total number of items by sending an
/// [`LVM_GETITEMCOUNT`](crate::msg::lvm::GetItemCount) message.
pub fn count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetItemCount {})
}
/// Deletes the items at the given indexes by sending an
/// [`LVM_DELETEITEM`](crate::msg::lvm::DeleteItem) message.
///
/// The indexes are iterated backwards, so the last item will be deleted
/// first.
pub fn delete(&self, item_indexes: &[u32]) -> WinResult<()> {
for idx in item_indexes.iter().rev() {
self.hwnd().SendMessage(lvm::DeleteItem {
index: *idx,
})?;
}
Ok(())
}
/// Deletes all items by sending an
/// [`LVM_DELETEALLITEMS`](crate::msg::lvm::DeleteAllItems) message.
pub fn delete_all(&self) -> WinResult<()> {
self.hwnd().SendMessage(lvm::DeleteAllItems {})
}
/// Deletes the selected items by sending
/// [`LVM_DELETEITEM`](crate::msg::lvm::DeleteItem) messages.
pub fn delete_selected(&self) -> WinResult<()> {
loop {
match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::SELECTED,
}) {
Some(index) => self.hwnd().SendMessage(lvm::DeleteItem { index })?,
None => break,
};
}
Ok(())
}
/// Scrolls the list by sending an
/// [`LVM_ENSUREVISIBLE`](crate::msg::lvm::EnsureVisible) message so that an
/// item is visible in the list.
pub fn ensure_visible(&self, item_index: u32) -> WinResult<()> {
self.hwnd().SendMessage(lvm::EnsureVisible {
index: item_index,
entirely_visible: true,
})
}
/// Searches for an item with the given text, case-insensitive, by sending
/// an [`LVM_FINDITEM`](crate::msg::lvm::FindItem) message.
pub fn find(&self, text: &str) -> Option<u32> {
let mut buf = WString::from_str(text);
let mut lvfi = LVFINDINFO::default();
lvfi.flags = co::LVFI::STRING;
lvfi.set_psz(Some(&mut buf));
self.hwnd().SendMessage(lvm::FindItem {
start_index: None,
lvfindinfo: &mut lvfi,
})
}
/// Retrieves the index of the focused item by sending an
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) message.
pub fn focused(&self) -> Option<u32> {
self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: None,
relationship: co::LVNI::FOCUSED,
})
}
/// Retrieves the item at the specified position by sending an
/// [`LVM_HITTEST`](crate::msg::lvm::HitTest) message
pub fn hit_test(&self, info: &mut LVHITTESTINFO) -> Option<u32> {
self.hwnd().SendMessage(lvm::HitTest { info })
}
/// Tells if the item is the focused one by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_focused(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::FOCUSED,
}).has(co::LVIS::FOCUSED)
}
/// Tells if the item is selected by sending an
/// [`LVM_GETITEMSTATE`](crate::msg::lvm::GetItemState) message.
pub fn is_selected(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::GetItemState {
index: item_index,
mask: co::LVIS::SELECTED,
}).has(co::LVIS::SELECTED)
}
/// Tells if the item is currently visible by sending an
/// [`LVM_ISITEMVISIBLE`](crate::msg::lvm::IsItemVisible) message.
pub fn is_visible(&self, item_index: u32) -> bool {
self.hwnd().SendMessage(lvm::IsItemVisible { index: item_index })
}
/// Retrieves the actual index of the unique ID by sending an
/// [`LVM_MAPIDTOINDEX`](crate::msg::lvm::MapIdToIndex) message.
pub fn map_id_to_index(&self, item_id: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIdToIndex { id: item_id })
}
/// Retrieves an unique ID for the given index by sending an
/// [`LVM_MAPINDEXTOID`](crate::msg::lvm::MapIndexToId) message.
pub fn map_index_to_id(&self, item_index: u32) -> Option<u32> {
self.hwnd().SendMessage(lvm::MapIndexToId { index: item_index })
}
/// Retrieves the bound rectangle of item by sending an
/// [`LVM_GETITEMRECT`](crate::msg::lvm::GetItemRect) message.
pub fn rect(&self, item_index: u32, portion: co::LVIR) -> WinResult<RECT> {
let mut rc = RECT::default();
self.hwnd().SendMessage(lvm::GetItemRect {
index: item_index,
rect: &mut rc,
portion,
})?;
Ok(rc)
}
/// Retrieves the indexes of the selected items by sending
/// [`LVM_GETNEXTITEM`](crate::msg::lvm::GetNextItem) messages.
pub fn selected(&self) -> Vec<u32> {
let mut items = Vec::with_capacity(self.selected_count() as _);
let mut idx = None;
loop {
idx = match self.hwnd().SendMessage(lvm::GetNextItem {
initial_index: idx,
relationship: co::LVNI::SELECTED,
}) {
Some(idx) => {
items.push(idx);
Some(idx)
},
None => break,
};
}
items
}
/// Retrieves the number of selected items by sending an
/// [`LVM_GETSELECTEDCOUNT`](crate::msg::lvm::GetSelectedCount) message.
pub fn selected_count(&self) -> u32 {
self.hwnd().SendMessage(lvm::GetSelectedCount {})
}
/// Sets the focused item by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_focused(&self, item_index: u32) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::FOCUSED;
lvi.state = co::LVIS::FOCUSED;
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(item_index),
lvitem: &lvi,
})
}
/// Sets or remove the selection from the given item indexes by sending
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) messages.
pub fn set_selected(&self,
set: bool, item_indexes: &[u32]) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
for idx in item_indexes.iter() {
self.hwnd().SendMessage(lvm::SetItemState {
index: Some(*idx),
lvitem: &lvi,
})?;
}
Ok(())
}
/// Sets or remove the selection for all items by sending an
/// [`LVM_SETITEMSTATE`](crate::msg::lvm::SetItemState) message.
pub fn set_selected_all(&self, set: bool) -> WinResult<()> {
let mut lvi = LVITEM::default();
lvi.stateMask = co::LVIS::SELECTED;
if set { lvi.state = co::LVIS::SELECTED; }
self.hwnd().SendMessage(lvm::SetItemState {
index: None,
lvitem: &lvi,
})
}
/// Sets the text of an item under a column by sending an
/// [`LVM_SETITEMTEXT`](crate::msg::lvm::SetItemText) message.
pub fn set_text(&self,
item_index: u32, column_index: u32, text: &str) -> WinResult<()>
{
let mut lvi = LVITEM::default();
lvi.iSubItem = column_index as _;
let mut wtext = WString::from_str(text);
lvi.set_pszText(Some(&mut wtext));
self.hwnd().SendMessage(lvm::SetItemText {
index: item_index,
lvitem: &lvi,
})
}
/// Retrieves the text of an item under a column by sending an
/// [`LVM_GETITEMTEXT`](crate::msg::lvm::GetItemText) message.
///
/// The passed buffer will be automatically allocated.
///
/// This method can be more performant than
/// [`text_str`](crate::gui::ListViewItems::text_str) because the buffer can be
/// reused, avoiding multiple allocations. However, it has the inconvenient
/// of the manual conversion from [`WString`](crate::WString) to `String`.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::{gui, WString};
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// let mut buf = WString::default();
/// my_list.items().text(0, 2, &mut buf); // 1st item, 3rd column
///
/// println!("Text: {}", buf.to_string());
/// ```
pub fn text(&self, item_index: u32, column_index: u32, buf: &mut WString) {
Self::text_retrieve(self.hwnd(), item_index, column_index, buf)
}
pub(in crate::gui::native_controls) fn text_retrieve(
hwnd: HWND, item_index: u32, column_index: u32, buf: &mut WString)
| // Static method because it's also used by ListViewColumns.
// https://forums.codeguru.com/showthread.php?351972-Getting-listView-item-text-length
const BLOCK: usize = 64; // arbitrary
let mut buf_sz = BLOCK;
let mut buf = buf;
loop {
let mut lvi = LVITEM::default();
lvi.iSubItem = column_index as _;
buf.realloc_buffer(buf_sz);
lvi.set_pszText(Some(&mut buf));
let nchars = hwnd.SendMessage(lvm::GetItemText { // char count without terminating null
index: item_index,
lvitem: &mut lvi,
});
if (nchars as usize) + 1 < buf_sz { // to break, must have at least 1 char gap
break;
}
buf_sz += BLOCK; // increase buffer size to try again
}
}
/// A more convenient [`text`](crate::gui::ListViewItems::text), which
/// directly returns a `String` instead of requiring an external buffer.
///
/// # Examples
///
/// ```rust,ignore
/// use winsafe::gui;
///
/// let my_list: gui::ListView; // initialized somewhere
///
/// println!("Text: {}", my_list.items().text(0, 2)); // 1st item, 3rd column
/// ```
pub fn text_str(&self, item_index: u32, column_index: u32) -> String {
let mut buf = WString::default();
self.text(item_index, column_index, &mut buf);
buf.to_string()
}
} | {
| random_line_split |
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! Library to build `Custom_tag` OCaml values.
use std::ffi::CStr;
use std::ffi::CString;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::os::raw::c_char;
use std::os::raw::c_int;
use std::os::raw::c_void;
use std::rc::Rc;
use ocamlrep::from;
use ocamlrep::Allocator;
use ocamlrep::FromError;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use ocamlrep::CUSTOM_TAG;
use ocamlrep_ocamlpool::catch_unwind;
extern "C" {
fn caml_register_custom_operations(ops: *const CustomOperations);
fn caml_serialize_block_1(data: *const u8, len: usize);
fn caml_serialize_int_8(x: i64);
fn caml_deserialize_sint_8() -> i64;
fn caml_deserialize_block_1(data: *mut u8, len: usize);
}
/// Struct containing the operations for a custom OCaml block.
///
/// This is the Rust encoding of OCaml's `struct custom_operations`.
///
/// For more information on the fields see
/// [the OCaml guide](https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html#ss:c-custom-ops)
#[repr(C)]
pub struct CustomOperations {
identifier: *const c_char,
finalize: Option<extern "C" fn(usize) -> ()>,
compare: Option<extern "C" fn(usize, usize) -> c_int>,
hash: Option<extern "C" fn(usize) -> isize>,
serialize: Option<extern "C" fn(usize, *mut usize, *mut usize) -> ()>,
deserialize: Option<extern "C" fn(*mut c_void) -> usize>,
compare_ext: Option<extern "C" fn(usize, usize) -> c_int>,
/// Not implemented yet, always set to NULL.
custom_fixed_length: *const c_void,
}
impl CustomOperations {
/// Create a new custom block with the given identifier.
///
/// All function pointers will be set to NULL by default.
fn new(identifier: &'static CStr) -> Self {
Self {
identifier: identifier.as_ptr(),
finalize: None,
compare: None,
hash: None,
serialize: None,
deserialize: None,
compare_ext: None,
custom_fixed_length: std::ptr::null(),
}
}
}
/// A wrapper around a Rust type that allows it
/// to be written into/read from OCaml memory and managed by
/// the OCaml GC.
///
/// The value still lives on the Rust heap in an `Rc`'d pointer,
/// and the `Rc`-pointer itself will be written to OCaml memory.
///
/// # Examples
///
/// Expose Rust type:
///
/// ```rust
/// use ocamlrep_custom::caml_serialize_default_impls;
/// use ocamlrep_custom::{CamlSerialize, Custom};
/// use ocamlrep_ocamlpool::ocaml_ffi;
/// use std::cell::Cell;
///
/// pub struct Counter(Cell<isize>);
///
/// impl CamlSerialize for Counter {
/// caml_serialize_default_impls!();
/// }
///
/// ocaml_ffi! {
/// fn counter_new() -> Custom<Counter> {
/// Custom::from(Counter(Cell::new(0)))
/// }
///
/// fn counter_inc(counter: Custom<Counter>) -> Custom<Counter> {
/// counter.0.set(counter.0.get() - 1);
/// counter
/// }
///
/// fn counter_read(counter: Custom<Counter>) -> isize {
/// counter.0.get()
/// }
/// }
/// ```
///
/// From OCaml:
///
/// ```ocaml
/// type counter; (* abstract type *)
///
/// external counter_new : unit -> counter = "counter_new"
/// external counter_inc: counter -> unit = "counter_inc"
/// external counter_read : counter -> isize = "counter_read"
///
/// let () =
/// let cnt = counter_new () in (* will be dropped on GC finalization *)
/// assert (counter_read cnt == 0);
/// counter_inc cnt;
/// assert (counter_read cnt == 1)
/// ```
pub struct Custom<T: CamlSerialize>(Rc<T>);
impl<T: CamlSerialize> Custom<T> {
/// Create a new `ToCustom` wrapper by taking ownership of the value.
pub fn from(x: T) -> Self {
Self::new(Rc::new(x))
}
/// Create a new `ToCustom` directly from an `Rc`'d value.
pub fn new(x: Rc<T>) -> Self {
Self(x)
}
/// Get a reference to the inner `Rc`
pub fn inner(&self) -> &Rc<T> {
&self.0
}
}
impl<T: CamlSerialize> Deref for Custom<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
/// A custom block has two words: a pointer to the CustomOperations struct,
/// and a pointer the the value. Our values are ref-counted, but an Rc pointer
/// is just pointer-sized.
#[repr(C)]
struct CustomBlockOcamlRep<T>(&'static CustomOperations, Rc<T>);
const CUSTOM_BLOCK_SIZE_IN_BYTES: usize = std::mem::size_of::<CustomBlockOcamlRep<()>>();
const CUSTOM_BLOCK_SIZE_IN_WORDS: usize =
CUSTOM_BLOCK_SIZE_IN_BYTES / std::mem::size_of::<Value<'_>>();
impl<T: CamlSerialize> ToOcamlRep for Custom<T> {
fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> {
let ops: &'static CustomOperations = <T as CamlSerialize>::operations();
let mut block = alloc.block_with_size_and_tag(CUSTOM_BLOCK_SIZE_IN_WORDS, CUSTOM_TAG);
// Safety: we don't call any method on `alloc` after this method.
let block_ptr: *mut Value<'_> = unsafe { alloc.block_ptr_mut(&mut block) };
// Safety: `alloc` guarantees that the `block_ptr` returned by
// `block_ptr_mut` is aligend to `align_of::<Value>()` and valid
// for reads and writes of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// size_of::<Value>()` bytes. Since `CustomBlockOcamlRep` has size
// `CUSTOM_BLOCK_SIZE_IN_WORDS * size_of::<Value>()`, its
// alignment is equal to `align_of::<Value>()`, and no other
// reference to our newly-allocated block can exist, it's safe for us to
// interpret `block_ptr` as a `&mut CustomBlockOcamlRep`.
let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>;
let custom_block = unsafe { block_ptr.as_mut().unwrap() };
// Write the address of the operations struct to the first word, and the
// pointer to the value to the second word.
*custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if!std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
}
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> {... }
/// fn deserialize(buffer: &[u8]) -> Self {... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn drop_value<T: CamlSerialize>(value: usize) {
let _: usize = catch_unwind(|| { | // a CustomBlockOcamlRep<T> created by T::to_ocamlrep. Such a pointer
// would be aligned and valid.
let custom_block_ptr = value as *mut CustomBlockOcamlRep<T>;
let custom_block = unsafe { custom_block_ptr.as_mut().unwrap() };
// The `Rc` will be dropped here, and its reference count will decrease
// by one (possibly freeing the referenced value).
// Safety: Since the OCaml runtime will only invoke the finalizer for a
// value which will never again be used, it is safe to use
// `drop_in_place` (i.e., our finalizer will only be invoked once, so we
// won't cause a double-drop).
unsafe {
std::ptr::drop_in_place(&mut custom_block.1);
}
0
});
}
/// Helper function for serialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn serialize_value<T: CamlSerialize>(
value: usize,
bsize_32: *mut usize,
bsize_64: *mut usize,
) {
let _: usize = catch_unwind(|| {
// Safety: Only called by the OCaml runtime (we don't expose a means of
// invoking this function from Rust), which provides some OCaml
// CUSTOM_TAG block as the value.
let value = unsafe { Value::from_bits(value) };
// Only called by the OCaml runtime, when serializing
// a Custom-object managed by the OCaml GC.
let rc = rc_from_value::<T>(value).unwrap();
let bytes: Vec<u8> = rc.serialize();
let bytes_ptr = bytes.as_ptr();
// Safety: As above, we don't expose a means of invoking this function
// from Rust--it can only be invoked by the OCaml runtime while
// serializing a value. It is safe to invoke OCaml serialization
// functions in this context.
unsafe {
let len = bytes.len();
caml_serialize_int_8(len.try_into().unwrap());
caml_serialize_block_1(bytes_ptr, len);
// The size taken up in the data-part of the custom block.
*bsize_32 = std::mem::size_of::<u32>();
*bsize_64 = std::mem::size_of::<u64>();
}
0
});
}
/// Helper function for deserialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn deserialize_value<T: CamlSerialize>(data_ptr: *mut c_void) -> usize {
catch_unwind(|| {
// Get the serialized bytes from the input channel.
let bytes = unsafe {
// Safety: We don't expose a means of invoking this function from
// Rust--`deserialize_value` can only be invoked by the OCaml
// runtime while deserializing a custom block value. It is safe to
// invoke OCaml deserialization functions in this context.
let len: usize = caml_deserialize_sint_8().try_into().unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(len);
// Safety: len <= capacity. The elements aren't initialized at this
// time, but we trust that caml_deserialize_block_1 will fill `len`
// bytes of the buffer.
#[allow(clippy::uninit_vec)]
buf.set_len(len);
// Safety: As above, `deserialize_value` can only be invoked by the
// OCaml runtime during custom block deserialization.
caml_deserialize_block_1(buf.as_mut_ptr(), len);
buf
};
// Actually deserialize those bytes into a T.
let val: T = CamlSerialize::deserialize(&bytes);
// Safety: The OCaml runtime will give us a data buffer which is
// usize-aligned and valid for reads and writes of bsize_32 or bsize_64
// (as provided by `serialize_value`, above) bytes (depending on system
// architecture). This is sufficient for `Rc<T>` (which has the size and
// alignment of usize).
let data_ptr = data_ptr as *mut MaybeUninit<Rc<T>>;
let data = unsafe { data_ptr.as_mut().unwrap() };
*data = MaybeUninit::new(Rc::new(val));
// Return the size of the value we wrote to our output pointer. The
// OCaml runtime will verify that it matches the expected
// bsize_32/bsize_64 written by the serializer.
std::mem::size_of_val(data)
})
}
#[cfg(test)]
mod test {
use std::mem::*;
use super::*;
#[test]
fn custom_block_ocamlrep_size() {
assert_eq!(
size_of::<CustomBlockOcamlRep<u8>>(),
2 * size_of::<Value<'_>>()
);
}
#[test]
fn custom_block_ocamlrep_align() {
assert_eq!(
align_of::<CustomBlockOcamlRep<u8>>(),
align_of::<Value<'_>>()
);
}
} | // Safety: We trust here that CustomOperations structs containing this
// `drop_value` instance will only ever be referenced by custom blocks
// matching the layout of `CustomBlockOcamlRep`. If that's so, then this
// function should only be invoked by the OCaml runtime on a pointer to | random_line_split |
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! Library to build `Custom_tag` OCaml values.
use std::ffi::CStr;
use std::ffi::CString;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::os::raw::c_char;
use std::os::raw::c_int;
use std::os::raw::c_void;
use std::rc::Rc;
use ocamlrep::from;
use ocamlrep::Allocator;
use ocamlrep::FromError;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use ocamlrep::CUSTOM_TAG;
use ocamlrep_ocamlpool::catch_unwind;
extern "C" {
fn caml_register_custom_operations(ops: *const CustomOperations);
fn caml_serialize_block_1(data: *const u8, len: usize);
fn caml_serialize_int_8(x: i64);
fn caml_deserialize_sint_8() -> i64;
fn caml_deserialize_block_1(data: *mut u8, len: usize);
}
/// Struct containing the operations for a custom OCaml block.
///
/// This is the Rust encoding of OCaml's `struct custom_operations`.
///
/// For more information on the fields see
/// [the OCaml guide](https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html#ss:c-custom-ops)
#[repr(C)]
pub struct CustomOperations {
identifier: *const c_char,
finalize: Option<extern "C" fn(usize) -> ()>,
compare: Option<extern "C" fn(usize, usize) -> c_int>,
hash: Option<extern "C" fn(usize) -> isize>,
serialize: Option<extern "C" fn(usize, *mut usize, *mut usize) -> ()>,
deserialize: Option<extern "C" fn(*mut c_void) -> usize>,
compare_ext: Option<extern "C" fn(usize, usize) -> c_int>,
/// Not implemented yet, always set to NULL.
custom_fixed_length: *const c_void,
}
impl CustomOperations {
/// Create a new custom block with the given identifier.
///
/// All function pointers will be set to NULL by default.
fn new(identifier: &'static CStr) -> Self {
Self {
identifier: identifier.as_ptr(),
finalize: None,
compare: None,
hash: None,
serialize: None,
deserialize: None,
compare_ext: None,
custom_fixed_length: std::ptr::null(),
}
}
}
/// A wrapper around a Rust type that allows it
/// to be written into/read from OCaml memory and managed by
/// the OCaml GC.
///
/// The value still lives on the Rust heap in an `Rc`'d pointer,
/// and the `Rc`-pointer itself will be written to OCaml memory.
///
/// # Examples
///
/// Expose Rust type:
///
/// ```rust
/// use ocamlrep_custom::caml_serialize_default_impls;
/// use ocamlrep_custom::{CamlSerialize, Custom};
/// use ocamlrep_ocamlpool::ocaml_ffi;
/// use std::cell::Cell;
///
/// pub struct Counter(Cell<isize>);
///
/// impl CamlSerialize for Counter {
/// caml_serialize_default_impls!();
/// }
///
/// ocaml_ffi! {
/// fn counter_new() -> Custom<Counter> {
/// Custom::from(Counter(Cell::new(0)))
/// }
///
/// fn counter_inc(counter: Custom<Counter>) -> Custom<Counter> {
/// counter.0.set(counter.0.get() - 1);
/// counter
/// }
///
/// fn counter_read(counter: Custom<Counter>) -> isize {
/// counter.0.get()
/// }
/// }
/// ```
///
/// From OCaml:
///
/// ```ocaml
/// type counter; (* abstract type *)
///
/// external counter_new : unit -> counter = "counter_new"
/// external counter_inc: counter -> unit = "counter_inc"
/// external counter_read : counter -> isize = "counter_read"
///
/// let () =
/// let cnt = counter_new () in (* will be dropped on GC finalization *)
/// assert (counter_read cnt == 0);
/// counter_inc cnt;
/// assert (counter_read cnt == 1)
/// ```
pub struct Custom<T: CamlSerialize>(Rc<T>);
impl<T: CamlSerialize> Custom<T> {
/// Create a new `ToCustom` wrapper by taking ownership of the value.
pub fn from(x: T) -> Self {
Self::new(Rc::new(x))
}
/// Create a new `ToCustom` directly from an `Rc`'d value.
pub fn new(x: Rc<T>) -> Self {
Self(x)
}
/// Get a reference to the inner `Rc`
pub fn inner(&self) -> &Rc<T> {
&self.0
}
}
impl<T: CamlSerialize> Deref for Custom<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
/// A custom block has two words: a pointer to the CustomOperations struct,
/// and a pointer the the value. Our values are ref-counted, but an Rc pointer
/// is just pointer-sized.
#[repr(C)]
struct CustomBlockOcamlRep<T>(&'static CustomOperations, Rc<T>);
const CUSTOM_BLOCK_SIZE_IN_BYTES: usize = std::mem::size_of::<CustomBlockOcamlRep<()>>();
const CUSTOM_BLOCK_SIZE_IN_WORDS: usize =
CUSTOM_BLOCK_SIZE_IN_BYTES / std::mem::size_of::<Value<'_>>();
impl<T: CamlSerialize> ToOcamlRep for Custom<T> {
fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> {
let ops: &'static CustomOperations = <T as CamlSerialize>::operations();
let mut block = alloc.block_with_size_and_tag(CUSTOM_BLOCK_SIZE_IN_WORDS, CUSTOM_TAG);
// Safety: we don't call any method on `alloc` after this method.
let block_ptr: *mut Value<'_> = unsafe { alloc.block_ptr_mut(&mut block) };
// Safety: `alloc` guarantees that the `block_ptr` returned by
// `block_ptr_mut` is aligend to `align_of::<Value>()` and valid
// for reads and writes of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// size_of::<Value>()` bytes. Since `CustomBlockOcamlRep` has size
// `CUSTOM_BLOCK_SIZE_IN_WORDS * size_of::<Value>()`, its
// alignment is equal to `align_of::<Value>()`, and no other
// reference to our newly-allocated block can exist, it's safe for us to
// interpret `block_ptr` as a `&mut CustomBlockOcamlRep`.
let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>;
let custom_block = unsafe { block_ptr.as_mut().unwrap() };
// Write the address of the operations struct to the first word, and the
// pointer to the value to the second word.
*custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if!std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
}
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> {... }
/// fn deserialize(buffer: &[u8]) -> Self {... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn | <T: CamlSerialize>(value: usize) {
let _: usize = catch_unwind(|| {
// Safety: We trust here that CustomOperations structs containing this
// `drop_value` instance will only ever be referenced by custom blocks
// matching the layout of `CustomBlockOcamlRep`. If that's so, then this
// function should only be invoked by the OCaml runtime on a pointer to
// a CustomBlockOcamlRep<T> created by T::to_ocamlrep. Such a pointer
// would be aligned and valid.
let custom_block_ptr = value as *mut CustomBlockOcamlRep<T>;
let custom_block = unsafe { custom_block_ptr.as_mut().unwrap() };
// The `Rc` will be dropped here, and its reference count will decrease
// by one (possibly freeing the referenced value).
// Safety: Since the OCaml runtime will only invoke the finalizer for a
// value which will never again be used, it is safe to use
// `drop_in_place` (i.e., our finalizer will only be invoked once, so we
// won't cause a double-drop).
unsafe {
std::ptr::drop_in_place(&mut custom_block.1);
}
0
});
}
/// Helper function for serialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn serialize_value<T: CamlSerialize>(
value: usize,
bsize_32: *mut usize,
bsize_64: *mut usize,
) {
let _: usize = catch_unwind(|| {
// Safety: Only called by the OCaml runtime (we don't expose a means of
// invoking this function from Rust), which provides some OCaml
// CUSTOM_TAG block as the value.
let value = unsafe { Value::from_bits(value) };
// Only called by the OCaml runtime, when serializing
// a Custom-object managed by the OCaml GC.
let rc = rc_from_value::<T>(value).unwrap();
let bytes: Vec<u8> = rc.serialize();
let bytes_ptr = bytes.as_ptr();
// Safety: As above, we don't expose a means of invoking this function
// from Rust--it can only be invoked by the OCaml runtime while
// serializing a value. It is safe to invoke OCaml serialization
// functions in this context.
unsafe {
let len = bytes.len();
caml_serialize_int_8(len.try_into().unwrap());
caml_serialize_block_1(bytes_ptr, len);
// The size taken up in the data-part of the custom block.
*bsize_32 = std::mem::size_of::<u32>();
*bsize_64 = std::mem::size_of::<u64>();
}
0
});
}
/// Helper function for deserialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn deserialize_value<T: CamlSerialize>(data_ptr: *mut c_void) -> usize {
catch_unwind(|| {
// Get the serialized bytes from the input channel.
let bytes = unsafe {
// Safety: We don't expose a means of invoking this function from
// Rust--`deserialize_value` can only be invoked by the OCaml
// runtime while deserializing a custom block value. It is safe to
// invoke OCaml deserialization functions in this context.
let len: usize = caml_deserialize_sint_8().try_into().unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(len);
// Safety: len <= capacity. The elements aren't initialized at this
// time, but we trust that caml_deserialize_block_1 will fill `len`
// bytes of the buffer.
#[allow(clippy::uninit_vec)]
buf.set_len(len);
// Safety: As above, `deserialize_value` can only be invoked by the
// OCaml runtime during custom block deserialization.
caml_deserialize_block_1(buf.as_mut_ptr(), len);
buf
};
// Actually deserialize those bytes into a T.
let val: T = CamlSerialize::deserialize(&bytes);
// Safety: The OCaml runtime will give us a data buffer which is
// usize-aligned and valid for reads and writes of bsize_32 or bsize_64
// (as provided by `serialize_value`, above) bytes (depending on system
// architecture). This is sufficient for `Rc<T>` (which has the size and
// alignment of usize).
let data_ptr = data_ptr as *mut MaybeUninit<Rc<T>>;
let data = unsafe { data_ptr.as_mut().unwrap() };
*data = MaybeUninit::new(Rc::new(val));
// Return the size of the value we wrote to our output pointer. The
// OCaml runtime will verify that it matches the expected
// bsize_32/bsize_64 written by the serializer.
std::mem::size_of_val(data)
})
}
#[cfg(test)]
mod test {
use std::mem::*;
use super::*;
#[test]
fn custom_block_ocamlrep_size() {
assert_eq!(
size_of::<CustomBlockOcamlRep<u8>>(),
2 * size_of::<Value<'_>>()
);
}
#[test]
fn custom_block_ocamlrep_align() {
assert_eq!(
align_of::<CustomBlockOcamlRep<u8>>(),
align_of::<Value<'_>>()
);
}
}
| drop_value | identifier_name |
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! Library to build `Custom_tag` OCaml values.
use std::ffi::CStr;
use std::ffi::CString;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::os::raw::c_char;
use std::os::raw::c_int;
use std::os::raw::c_void;
use std::rc::Rc;
use ocamlrep::from;
use ocamlrep::Allocator;
use ocamlrep::FromError;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use ocamlrep::CUSTOM_TAG;
use ocamlrep_ocamlpool::catch_unwind;
extern "C" {
fn caml_register_custom_operations(ops: *const CustomOperations);
fn caml_serialize_block_1(data: *const u8, len: usize);
fn caml_serialize_int_8(x: i64);
fn caml_deserialize_sint_8() -> i64;
fn caml_deserialize_block_1(data: *mut u8, len: usize);
}
/// Struct containing the operations for a custom OCaml block.
///
/// This is the Rust encoding of OCaml's `struct custom_operations`.
///
/// For more information on the fields see
/// [the OCaml guide](https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html#ss:c-custom-ops)
#[repr(C)]
pub struct CustomOperations {
identifier: *const c_char,
finalize: Option<extern "C" fn(usize) -> ()>,
compare: Option<extern "C" fn(usize, usize) -> c_int>,
hash: Option<extern "C" fn(usize) -> isize>,
serialize: Option<extern "C" fn(usize, *mut usize, *mut usize) -> ()>,
deserialize: Option<extern "C" fn(*mut c_void) -> usize>,
compare_ext: Option<extern "C" fn(usize, usize) -> c_int>,
/// Not implemented yet, always set to NULL.
custom_fixed_length: *const c_void,
}
impl CustomOperations {
/// Create a new custom block with the given identifier.
///
/// All function pointers will be set to NULL by default.
fn new(identifier: &'static CStr) -> Self {
Self {
identifier: identifier.as_ptr(),
finalize: None,
compare: None,
hash: None,
serialize: None,
deserialize: None,
compare_ext: None,
custom_fixed_length: std::ptr::null(),
}
}
}
/// A wrapper around a Rust type that allows it
/// to be written into/read from OCaml memory and managed by
/// the OCaml GC.
///
/// The value still lives on the Rust heap in an `Rc`'d pointer,
/// and the `Rc`-pointer itself will be written to OCaml memory.
///
/// # Examples
///
/// Expose Rust type:
///
/// ```rust
/// use ocamlrep_custom::caml_serialize_default_impls;
/// use ocamlrep_custom::{CamlSerialize, Custom};
/// use ocamlrep_ocamlpool::ocaml_ffi;
/// use std::cell::Cell;
///
/// pub struct Counter(Cell<isize>);
///
/// impl CamlSerialize for Counter {
/// caml_serialize_default_impls!();
/// }
///
/// ocaml_ffi! {
/// fn counter_new() -> Custom<Counter> {
/// Custom::from(Counter(Cell::new(0)))
/// }
///
/// fn counter_inc(counter: Custom<Counter>) -> Custom<Counter> {
/// counter.0.set(counter.0.get() - 1);
/// counter
/// }
///
/// fn counter_read(counter: Custom<Counter>) -> isize {
/// counter.0.get()
/// }
/// }
/// ```
///
/// From OCaml:
///
/// ```ocaml
/// type counter; (* abstract type *)
///
/// external counter_new : unit -> counter = "counter_new"
/// external counter_inc: counter -> unit = "counter_inc"
/// external counter_read : counter -> isize = "counter_read"
///
/// let () =
/// let cnt = counter_new () in (* will be dropped on GC finalization *)
/// assert (counter_read cnt == 0);
/// counter_inc cnt;
/// assert (counter_read cnt == 1)
/// ```
pub struct Custom<T: CamlSerialize>(Rc<T>);
impl<T: CamlSerialize> Custom<T> {
/// Create a new `ToCustom` wrapper by taking ownership of the value.
pub fn from(x: T) -> Self {
Self::new(Rc::new(x))
}
/// Create a new `ToCustom` directly from an `Rc`'d value.
pub fn new(x: Rc<T>) -> Self {
Self(x)
}
/// Get a reference to the inner `Rc`
pub fn inner(&self) -> &Rc<T> {
&self.0
}
}
impl<T: CamlSerialize> Deref for Custom<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
/// A custom block has two words: a pointer to the CustomOperations struct,
/// and a pointer the the value. Our values are ref-counted, but an Rc pointer
/// is just pointer-sized.
#[repr(C)]
struct CustomBlockOcamlRep<T>(&'static CustomOperations, Rc<T>);
const CUSTOM_BLOCK_SIZE_IN_BYTES: usize = std::mem::size_of::<CustomBlockOcamlRep<()>>();
const CUSTOM_BLOCK_SIZE_IN_WORDS: usize =
CUSTOM_BLOCK_SIZE_IN_BYTES / std::mem::size_of::<Value<'_>>();
impl<T: CamlSerialize> ToOcamlRep for Custom<T> {
fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> {
let ops: &'static CustomOperations = <T as CamlSerialize>::operations();
let mut block = alloc.block_with_size_and_tag(CUSTOM_BLOCK_SIZE_IN_WORDS, CUSTOM_TAG);
// Safety: we don't call any method on `alloc` after this method.
let block_ptr: *mut Value<'_> = unsafe { alloc.block_ptr_mut(&mut block) };
// Safety: `alloc` guarantees that the `block_ptr` returned by
// `block_ptr_mut` is aligend to `align_of::<Value>()` and valid
// for reads and writes of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// size_of::<Value>()` bytes. Since `CustomBlockOcamlRep` has size
// `CUSTOM_BLOCK_SIZE_IN_WORDS * size_of::<Value>()`, its
// alignment is equal to `align_of::<Value>()`, and no other
// reference to our newly-allocated block can exist, it's safe for us to
// interpret `block_ptr` as a `&mut CustomBlockOcamlRep`.
let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>;
let custom_block = unsafe { block_ptr.as_mut().unwrap() };
// Write the address of the operations struct to the first word, and the
// pointer to the value to the second word.
*custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if!std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
}
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> {... }
/// fn deserialize(buffer: &[u8]) -> Self {... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn drop_value<T: CamlSerialize>(value: usize) {
let _: usize = catch_unwind(|| {
// Safety: We trust here that CustomOperations structs containing this
// `drop_value` instance will only ever be referenced by custom blocks
// matching the layout of `CustomBlockOcamlRep`. If that's so, then this
// function should only be invoked by the OCaml runtime on a pointer to
// a CustomBlockOcamlRep<T> created by T::to_ocamlrep. Such a pointer
// would be aligned and valid.
let custom_block_ptr = value as *mut CustomBlockOcamlRep<T>;
let custom_block = unsafe { custom_block_ptr.as_mut().unwrap() };
// The `Rc` will be dropped here, and its reference count will decrease
// by one (possibly freeing the referenced value).
// Safety: Since the OCaml runtime will only invoke the finalizer for a
// value which will never again be used, it is safe to use
// `drop_in_place` (i.e., our finalizer will only be invoked once, so we
// won't cause a double-drop).
unsafe {
std::ptr::drop_in_place(&mut custom_block.1);
}
0
});
}
/// Helper function for serialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn serialize_value<T: CamlSerialize>(
value: usize,
bsize_32: *mut usize,
bsize_64: *mut usize,
) {
let _: usize = catch_unwind(|| {
// Safety: Only called by the OCaml runtime (we don't expose a means of
// invoking this function from Rust), which provides some OCaml
// CUSTOM_TAG block as the value.
let value = unsafe { Value::from_bits(value) };
// Only called by the OCaml runtime, when serializing
// a Custom-object managed by the OCaml GC.
let rc = rc_from_value::<T>(value).unwrap();
let bytes: Vec<u8> = rc.serialize();
let bytes_ptr = bytes.as_ptr();
// Safety: As above, we don't expose a means of invoking this function
// from Rust--it can only be invoked by the OCaml runtime while
// serializing a value. It is safe to invoke OCaml serialization
// functions in this context.
unsafe {
let len = bytes.len();
caml_serialize_int_8(len.try_into().unwrap());
caml_serialize_block_1(bytes_ptr, len);
// The size taken up in the data-part of the custom block.
*bsize_32 = std::mem::size_of::<u32>();
*bsize_64 = std::mem::size_of::<u64>();
}
0
});
}
/// Helper function for deserialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn deserialize_value<T: CamlSerialize>(data_ptr: *mut c_void) -> usize {
catch_unwind(|| {
// Get the serialized bytes from the input channel.
let bytes = unsafe {
// Safety: We don't expose a means of invoking this function from
// Rust--`deserialize_value` can only be invoked by the OCaml
// runtime while deserializing a custom block value. It is safe to
// invoke OCaml deserialization functions in this context.
let len: usize = caml_deserialize_sint_8().try_into().unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(len);
// Safety: len <= capacity. The elements aren't initialized at this
// time, but we trust that caml_deserialize_block_1 will fill `len`
// bytes of the buffer.
#[allow(clippy::uninit_vec)]
buf.set_len(len);
// Safety: As above, `deserialize_value` can only be invoked by the
// OCaml runtime during custom block deserialization.
caml_deserialize_block_1(buf.as_mut_ptr(), len);
buf
};
// Actually deserialize those bytes into a T.
let val: T = CamlSerialize::deserialize(&bytes);
// Safety: The OCaml runtime will give us a data buffer which is
// usize-aligned and valid for reads and writes of bsize_32 or bsize_64
// (as provided by `serialize_value`, above) bytes (depending on system
// architecture). This is sufficient for `Rc<T>` (which has the size and
// alignment of usize).
let data_ptr = data_ptr as *mut MaybeUninit<Rc<T>>;
let data = unsafe { data_ptr.as_mut().unwrap() };
*data = MaybeUninit::new(Rc::new(val));
// Return the size of the value we wrote to our output pointer. The
// OCaml runtime will verify that it matches the expected
// bsize_32/bsize_64 written by the serializer.
std::mem::size_of_val(data)
})
}
#[cfg(test)]
mod test {
use std::mem::*;
use super::*;
#[test]
fn custom_block_ocamlrep_size() {
assert_eq!(
size_of::<CustomBlockOcamlRep<u8>>(),
2 * size_of::<Value<'_>>()
);
}
#[test]
fn custom_block_ocamlrep_align() |
}
| {
assert_eq!(
align_of::<CustomBlockOcamlRep<u8>>(),
align_of::<Value<'_>>()
);
} | identifier_body |
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! Library to build `Custom_tag` OCaml values.
use std::ffi::CStr;
use std::ffi::CString;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::os::raw::c_char;
use std::os::raw::c_int;
use std::os::raw::c_void;
use std::rc::Rc;
use ocamlrep::from;
use ocamlrep::Allocator;
use ocamlrep::FromError;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use ocamlrep::CUSTOM_TAG;
use ocamlrep_ocamlpool::catch_unwind;
extern "C" {
fn caml_register_custom_operations(ops: *const CustomOperations);
fn caml_serialize_block_1(data: *const u8, len: usize);
fn caml_serialize_int_8(x: i64);
fn caml_deserialize_sint_8() -> i64;
fn caml_deserialize_block_1(data: *mut u8, len: usize);
}
/// Struct containing the operations for a custom OCaml block.
///
/// This is the Rust encoding of OCaml's `struct custom_operations`.
///
/// For more information on the fields see
/// [the OCaml guide](https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html#ss:c-custom-ops)
#[repr(C)]
pub struct CustomOperations {
identifier: *const c_char,
finalize: Option<extern "C" fn(usize) -> ()>,
compare: Option<extern "C" fn(usize, usize) -> c_int>,
hash: Option<extern "C" fn(usize) -> isize>,
serialize: Option<extern "C" fn(usize, *mut usize, *mut usize) -> ()>,
deserialize: Option<extern "C" fn(*mut c_void) -> usize>,
compare_ext: Option<extern "C" fn(usize, usize) -> c_int>,
/// Not implemented yet, always set to NULL.
custom_fixed_length: *const c_void,
}
impl CustomOperations {
/// Create a new custom block with the given identifier.
///
/// All function pointers will be set to NULL by default.
fn new(identifier: &'static CStr) -> Self {
Self {
identifier: identifier.as_ptr(),
finalize: None,
compare: None,
hash: None,
serialize: None,
deserialize: None,
compare_ext: None,
custom_fixed_length: std::ptr::null(),
}
}
}
/// A wrapper around a Rust type that allows it
/// to be written into/read from OCaml memory and managed by
/// the OCaml GC.
///
/// The value still lives on the Rust heap in an `Rc`'d pointer,
/// and the `Rc`-pointer itself will be written to OCaml memory.
///
/// # Examples
///
/// Expose Rust type:
///
/// ```rust
/// use ocamlrep_custom::caml_serialize_default_impls;
/// use ocamlrep_custom::{CamlSerialize, Custom};
/// use ocamlrep_ocamlpool::ocaml_ffi;
/// use std::cell::Cell;
///
/// pub struct Counter(Cell<isize>);
///
/// impl CamlSerialize for Counter {
/// caml_serialize_default_impls!();
/// }
///
/// ocaml_ffi! {
/// fn counter_new() -> Custom<Counter> {
/// Custom::from(Counter(Cell::new(0)))
/// }
///
/// fn counter_inc(counter: Custom<Counter>) -> Custom<Counter> {
/// counter.0.set(counter.0.get() - 1);
/// counter
/// }
///
/// fn counter_read(counter: Custom<Counter>) -> isize {
/// counter.0.get()
/// }
/// }
/// ```
///
/// From OCaml:
///
/// ```ocaml
/// type counter; (* abstract type *)
///
/// external counter_new : unit -> counter = "counter_new"
/// external counter_inc: counter -> unit = "counter_inc"
/// external counter_read : counter -> isize = "counter_read"
///
/// let () =
/// let cnt = counter_new () in (* will be dropped on GC finalization *)
/// assert (counter_read cnt == 0);
/// counter_inc cnt;
/// assert (counter_read cnt == 1)
/// ```
pub struct Custom<T: CamlSerialize>(Rc<T>);
impl<T: CamlSerialize> Custom<T> {
/// Create a new `ToCustom` wrapper by taking ownership of the value.
pub fn from(x: T) -> Self {
Self::new(Rc::new(x))
}
/// Create a new `ToCustom` directly from an `Rc`'d value.
pub fn new(x: Rc<T>) -> Self {
Self(x)
}
/// Get a reference to the inner `Rc`
pub fn inner(&self) -> &Rc<T> {
&self.0
}
}
impl<T: CamlSerialize> Deref for Custom<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
/// A custom block has two words: a pointer to the CustomOperations struct,
/// and a pointer the the value. Our values are ref-counted, but an Rc pointer
/// is just pointer-sized.
#[repr(C)]
struct CustomBlockOcamlRep<T>(&'static CustomOperations, Rc<T>);
const CUSTOM_BLOCK_SIZE_IN_BYTES: usize = std::mem::size_of::<CustomBlockOcamlRep<()>>();
const CUSTOM_BLOCK_SIZE_IN_WORDS: usize =
CUSTOM_BLOCK_SIZE_IN_BYTES / std::mem::size_of::<Value<'_>>();
impl<T: CamlSerialize> ToOcamlRep for Custom<T> {
fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> {
let ops: &'static CustomOperations = <T as CamlSerialize>::operations();
let mut block = alloc.block_with_size_and_tag(CUSTOM_BLOCK_SIZE_IN_WORDS, CUSTOM_TAG);
// Safety: we don't call any method on `alloc` after this method.
let block_ptr: *mut Value<'_> = unsafe { alloc.block_ptr_mut(&mut block) };
// Safety: `alloc` guarantees that the `block_ptr` returned by
// `block_ptr_mut` is aligend to `align_of::<Value>()` and valid
// for reads and writes of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// size_of::<Value>()` bytes. Since `CustomBlockOcamlRep` has size
// `CUSTOM_BLOCK_SIZE_IN_WORDS * size_of::<Value>()`, its
// alignment is equal to `align_of::<Value>()`, and no other
// reference to our newly-allocated block can exist, it's safe for us to
// interpret `block_ptr` as a `&mut CustomBlockOcamlRep`.
let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>;
let custom_block = unsafe { block_ptr.as_mut().unwrap() };
// Write the address of the operations struct to the first word, and the
// pointer to the value to the second word.
*custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0)));
block.build()
}
}
impl<T: CamlSerialize> FromOcamlRep for Custom<T> {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let rc = rc_from_value::<T>(value)?;
let rc = Rc::clone(rc);
Ok(Custom::new(rc))
}
}
/// Helper function to fetch a reference to the `Rc` from the OCaml representation
/// of a custom block.
fn rc_from_value<'a, T: CamlSerialize>(value: Value<'a>) -> Result<&'a Rc<T>, FromError> {
let block = from::expect_block(value)?;
from::expect_block_tag(block, CUSTOM_TAG)?;
from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?;
// We still don't know whether this block is in fact a
// CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some
// other custom block which happens to be the same size. We can verify
// that the block is actually a CustomBlockOcamlRep<T> by checking that
// it points to the correct CustomOperations struct.
let ops = <T as CamlSerialize>::operations();
if!std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) |
let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>;
// Safety: `value_ptr` is guaranteed to be aligned to
// `align_of::<Value>()`, and our use of `expect_block_size` guarantees
// that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS *
// `size_of::<Value>()` bytes. Since the first field points to the right
// operations struct, we either have a valid `CustomBlockOCamlRep<T>`
// (i.e., constructed above in our `ToOcamlRep` implementation) or
// someone went out of their way to construct an invalid one. Assume
// it's valid and read in the `CustomBlockOcamlRep<T>`.
let custom_block = unsafe { value_ptr.as_ref().unwrap() };
Ok(&custom_block.1)
}
/// Trait that allows OCaml serialization and deserialization.
///
/// If you want to support serialization/deserialization, you
/// **MUST** call `CamlSerialize::register()` when starting up
/// the program.
///
/// This will register your type in the OCaml runtime, allowing
/// deserialization.
///
/// Rust does not support different instantiations of the default
/// implementation for different implementors of trait types. Therefore,
/// you must implement `type_identifier`, `operations` and `register`
/// manually when implementing this trait for a type. You can use
/// the `caml_serialize_default_impls!()` to do that automatically:
///
/// ```
/// impl CamlSerialize for MyType {
/// caml_serialize_default_impls!();
/// }
/// ```
pub trait CamlSerialize: Sized {
/// Get the type name.
fn type_identifier() -> &'static CStr;
/// Get the type's custom operations struct.
///
/// Always has to return the same reference! If not, the
/// OCaml-to-Rust conversion will fail.
///
/// The returned structure is not intended to be used by
/// a programmer. Using it directly by e.g. injecting it
/// into OCaml custom blocks is dangerous and can cause
/// undefined behavior. Don't do it!
fn operations() -> &'static CustomOperations;
/// Register the type with the OCaml system.
///
/// # Safety
///
/// Must not be called from multiple threads.
///
/// This function interacts with the OCaml runtime, which is not thread-safe.
/// If any other threads are attempting to interact with the OCaml runtime
/// or its custom operations table (e.g., by invoking this function, or by
/// executing OCaml code using custom blocks) when this function is invoked,
/// undefined behavior will result.
///
/// # Examples
///
/// ```
/// use ocamlrep_custom::CamlSerialize;
/// use ocamlrep_ocamlpool::ocaml_ffi;
///
/// struct IntBox(isize);
///
/// impl CamlSerialize for IntBox {
/// caml_serialize_default_impls!();
/// fn serialize(&self) -> Vec<u8> {... }
/// fn deserialize(buffer: &[u8]) -> Self {... }
/// }
///
/// ocaml_ffi! {
/// fn register_custom_types() {
/// // Once `register_custom_types` has been invoked from OCaml, IntBox
/// // can be serialized and deserialized from OCaml using the Marshal
/// // module.
/// //
/// // Safety: this will be called from OCaml, as such nothing else will
/// // be interacting with the OCaml runtime.
/// unsafe { IntBox::register() };
/// }
/// }
/// ```
unsafe fn register();
/// Convert a value to an array of bytes.
///
/// The default implementation panics.
fn serialize(&self) -> Vec<u8> {
panic!(
"serialization not implemented for {:?}",
Self::type_identifier()
)
}
/// Deserialize a value form an array of bytes.
///
/// The default implementation panics.
fn deserialize(_data: &[u8]) -> Self {
panic!(
"deserialization not implemented for {:?}",
Self::type_identifier()
)
}
}
#[macro_export]
macro_rules! caml_serialize_default_impls {
() => {
fn type_identifier() -> &'static std::ffi::CStr {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut TYPE_NAME: Option<std::ffi::CString> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
TYPE_NAME = Some($crate::type_identifier_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { TYPE_NAME.as_ref().unwrap() }
}
fn operations() -> &'static $crate::CustomOperations {
static ONCE: std::sync::Once = std::sync::Once::new();
static mut OPS_STRUCT: Option<$crate::CustomOperations> = None;
ONCE.call_once(|| {
// Safety:
// - We've gated initialization, so it's thread safe.
// - We only set the constant once.
unsafe {
OPS_STRUCT = Some($crate::operations_helper::<Self>());
}
});
// Safety:
// - By now the constant has been initialized, and once initialized
// it is never changes.
// - Concurrent reads are OK.
unsafe { OPS_STRUCT.as_ref().unwrap() }
}
unsafe fn register() {
static mut IS_REGISTERED: bool = false;
// Safety: Can only be called in a single-threaded context!
if IS_REGISTERED {
return;
}
IS_REGISTERED = true;
let ops = Self::operations();
$crate::register_helper::<Self>(ops)
}
};
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn type_identifier_helper<T>() -> CString {
let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>());
std::ffi::CString::new(name).unwrap()
}
/// Helper used for the `caml_serialize_default_impls` macro
pub fn operations_helper<T: CamlSerialize>() -> CustomOperations {
let type_identifier = <T as CamlSerialize>::type_identifier();
let mut ops = CustomOperations::new(type_identifier);
ops.finalize = Some(drop_value::<T>);
ops.serialize = Some(serialize_value::<T>);
ops.deserialize = Some(deserialize_value::<T>);
ops
}
/// Helper used for the `caml_serialize_default_impls` macro
///
/// Should not be used directly. Interacts with the OCaml runtime and is
/// thus unsafe to call in a multi-threaded context.
pub unsafe fn register_helper<T>(ops: &'static CustomOperations) {
// Safety: operations struct has a static lifetime, it will live forever!
caml_register_custom_operations(ops as *const CustomOperations);
}
/// Helper function used by `operations_helper`. Returns a finalizer for custom
/// blocks containing an `Rc<T>`.
extern "C" fn drop_value<T: CamlSerialize>(value: usize) {
let _: usize = catch_unwind(|| {
// Safety: We trust here that CustomOperations structs containing this
// `drop_value` instance will only ever be referenced by custom blocks
// matching the layout of `CustomBlockOcamlRep`. If that's so, then this
// function should only be invoked by the OCaml runtime on a pointer to
// a CustomBlockOcamlRep<T> created by T::to_ocamlrep. Such a pointer
// would be aligned and valid.
let custom_block_ptr = value as *mut CustomBlockOcamlRep<T>;
let custom_block = unsafe { custom_block_ptr.as_mut().unwrap() };
// The `Rc` will be dropped here, and its reference count will decrease
// by one (possibly freeing the referenced value).
// Safety: Since the OCaml runtime will only invoke the finalizer for a
// value which will never again be used, it is safe to use
// `drop_in_place` (i.e., our finalizer will only be invoked once, so we
// won't cause a double-drop).
unsafe {
std::ptr::drop_in_place(&mut custom_block.1);
}
0
});
}
/// Helper function for serialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn serialize_value<T: CamlSerialize>(
value: usize,
bsize_32: *mut usize,
bsize_64: *mut usize,
) {
let _: usize = catch_unwind(|| {
// Safety: Only called by the OCaml runtime (we don't expose a means of
// invoking this function from Rust), which provides some OCaml
// CUSTOM_TAG block as the value.
let value = unsafe { Value::from_bits(value) };
// Only called by the OCaml runtime, when serializing
// a Custom-object managed by the OCaml GC.
let rc = rc_from_value::<T>(value).unwrap();
let bytes: Vec<u8> = rc.serialize();
let bytes_ptr = bytes.as_ptr();
// Safety: As above, we don't expose a means of invoking this function
// from Rust--it can only be invoked by the OCaml runtime while
// serializing a value. It is safe to invoke OCaml serialization
// functions in this context.
unsafe {
let len = bytes.len();
caml_serialize_int_8(len.try_into().unwrap());
caml_serialize_block_1(bytes_ptr, len);
// The size taken up in the data-part of the custom block.
*bsize_32 = std::mem::size_of::<u32>();
*bsize_64 = std::mem::size_of::<u64>();
}
0
});
}
/// Helper function for deserialization. Interacts with the OCaml runtime, so must
/// only be invoked by the OCaml runtime when serializing a custom block.
extern "C" fn deserialize_value<T: CamlSerialize>(data_ptr: *mut c_void) -> usize {
catch_unwind(|| {
// Get the serialized bytes from the input channel.
let bytes = unsafe {
// Safety: We don't expose a means of invoking this function from
// Rust--`deserialize_value` can only be invoked by the OCaml
// runtime while deserializing a custom block value. It is safe to
// invoke OCaml deserialization functions in this context.
let len: usize = caml_deserialize_sint_8().try_into().unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(len);
// Safety: len <= capacity. The elements aren't initialized at this
// time, but we trust that caml_deserialize_block_1 will fill `len`
// bytes of the buffer.
#[allow(clippy::uninit_vec)]
buf.set_len(len);
// Safety: As above, `deserialize_value` can only be invoked by the
// OCaml runtime during custom block deserialization.
caml_deserialize_block_1(buf.as_mut_ptr(), len);
buf
};
// Actually deserialize those bytes into a T.
let val: T = CamlSerialize::deserialize(&bytes);
// Safety: The OCaml runtime will give us a data buffer which is
// usize-aligned and valid for reads and writes of bsize_32 or bsize_64
// (as provided by `serialize_value`, above) bytes (depending on system
// architecture). This is sufficient for `Rc<T>` (which has the size and
// alignment of usize).
let data_ptr = data_ptr as *mut MaybeUninit<Rc<T>>;
let data = unsafe { data_ptr.as_mut().unwrap() };
*data = MaybeUninit::new(Rc::new(val));
// Return the size of the value we wrote to our output pointer. The
// OCaml runtime will verify that it matches the expected
// bsize_32/bsize_64 written by the serializer.
std::mem::size_of_val(data)
})
}
#[cfg(test)]
mod test {
use std::mem::*;
use super::*;
#[test]
fn custom_block_ocamlrep_size() {
assert_eq!(
size_of::<CustomBlockOcamlRep<u8>>(),
2 * size_of::<Value<'_>>()
);
}
#[test]
fn custom_block_ocamlrep_align() {
assert_eq!(
align_of::<CustomBlockOcamlRep<u8>>(),
align_of::<Value<'_>>()
);
}
}
| {
return Err(FromError::UnexpectedCustomOps {
expected: ops as *const _ as usize,
actual: block[0].to_bits(),
});
} | conditional_block |
test.rs | // Code that generates a test runner to run all the tests in a crate
#![allow(dead_code)]
#![allow(unused_imports)]
use HasTestSignature::*;
use std::iter;
use std::slice;
use std::mem;
use std::vec;
use log::debug;
use smallvec::{smallvec, SmallVec};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, Span, SourceFile, BytePos};
use crate::attr::{self, HasAttrs};
use crate::source_map::{self, SourceMap, ExpnInfo, MacroAttribute, dummy_spanned, respan};
use crate::config;
use crate::entry::{self, EntryPointType};
use crate::ext::base::{ExtCtxt, Resolver};
use crate::ext::build::AstBuilder;
use crate::ext::expand::ExpansionConfig;
use crate::ext::hygiene::{self, Mark, SyntaxContext};
use crate::mut_visit::{*, ExpectOne};
use crate::feature_gate::Features;
use crate::util::map_in_place::MapInPlace;
use crate::parse::{token, ParseSess};
use crate::print::pprust;
use crate::ast::{self, Ident};
use crate::ptr::P;
use crate::symbol::{self, Symbol, keywords};
use crate::ThinVec;
struct Test {
span: Span,
path: Vec<Ident>,
}
struct TestCtxt<'a> {
span_diagnostic: &'a errors::Handler,
path: Vec<Ident>,
ext_cx: ExtCtxt<'a>,
test_cases: Vec<Test>,
reexport_test_harness_main: Option<Symbol>,
is_libtest: bool,
ctxt: SyntaxContext,
features: &'a Features,
test_runner: Option<ast::Path>,
// top-level re-export submodule, filled out after folding is finished
toplevel_reexport: Option<Ident>,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: &ParseSess,
resolver: &mut dyn Resolver,
should_test: bool,
krate: &mut ast::Crate,
span_diagnostic: &errors::Handler,
features: &Features) {
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use __test::main as some_name;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
// Do this here so that the test_runner crate attribute gets marked as used
// even in non-test builds
let test_runner = get_test_runner(span_diagnostic, &krate);
if should_test {
generate_test_harness(sess, resolver, reexport_test_harness_main,
krate, span_diagnostic, features, test_runner)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(Ident, Ident)>,
}
impl<'a> MutVisitor for TestHarnessGenerator<'a> {
fn visit_crate(&mut self, c: &mut ast::Crate) {
noop_visit_crate(c, self);
// Create a main function to run our tests
let test_main = {
let unresolved = mk_main(&mut self.cx);
self.cx.ext_cx.monotonic_expander().flat_map_item(unresolved).pop().unwrap()
};
c.module.items.push(test_main);
}
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
let ident = i.ident;
if ident.name!= keywords::Invalid.name() {
self.cx.path.push(ident);
}
debug!("current path: {}", path_name_i(&self.cx.path));
let mut item = i.into_inner();
if is_test_case(&item) {
debug!("this is a test item");
let test = Test {
span: item.span,
path: self.cx.path.clone(),
};
self.cx.test_cases.push(test);
self.tests.push(item.ident);
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
if let ast::ItemKind::Mod(mut module) = item.node {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
noop_visit_mod(&mut module, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, item.id, tests, tested_submods);
module.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
item.node = ast::ItemKind::Mod(module);
}
if ident.name!= keywords::Invalid.name() {
self.cx.path.pop();
}
smallvec![P(item)]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// A folder used to remove any entry points (like fn main) because the harness
/// generator will provide its own
struct EntryPointCleaner {
// Current depth in the ast
depth: usize,
}
impl MutVisitor for EntryPointCleaner {
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
self.depth += 1;
let item = noop_flat_map_item(i, self).expect_one("noop did something");
self.depth -= 1;
// Remove any #[main] or #[start] from the AST so it doesn't
// clash with the one we're going to add, but mark it as
// #[allow(dead_code)] to avoid printing warnings.
let item = match entry::entry_point_type(&item, self.depth) {
EntryPointType::MainNamed |
EntryPointType::MainAttr |
EntryPointType::Start =>
item.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| {
let allow_ident = Ident::from_str("allow");
let dc_nested = attr::mk_nested_word_item(Ident::from_str("dead_code"));
let allow_dead_code_item = attr::mk_list_item(DUMMY_SP, allow_ident,
vec![dc_nested]);
let allow_dead_code = attr::mk_attr_outer(DUMMY_SP,
attr::mk_attr_id(),
allow_dead_code_item);
ast::Item {
id,
ident,
attrs: attrs.into_iter()
.filter(|attr| {
!attr.check_name("main") &&!attr.check_name("start")
})
.chain(iter::once(allow_dead_code))
.collect(),
node,
vis,
span,
tokens,
}
}),
EntryPointType::None |
EntryPointType::OtherMain => item,
};
smallvec![item]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// Creates an item (specifically a module) that "pub use"s the tests passed in.
/// Each tested submodule will contain a similar reexport module that we will export
/// under the name of the original module. That is, `submod::__test_reexports` is
/// reexported like so `pub use submod::__test_reexports as submod`.
fn mk_reexport_mod(cx: &mut TestCtxt<'_>,
parent: ast::NodeId,
tests: Vec<Ident>,
tested_submods: Vec<(Ident, Ident)>)
-> (P<ast::Item>, Ident) {
let super_ = Ident::from_str("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
Some(r), path)
})).collect();
let reexport_mod = ast::Mod {
inline: true,
inner: DUMMY_SP,
items,
};
let sym = Ident::with_empty_ctxt(Symbol::gensym("__test_reexports"));
let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().flat_map_item(P(ast::Item {
ident: sym,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mod(reexport_mod),
vis: dummy_spanned(ast::VisibilityKind::Public),
span: DUMMY_SP,
tokens: None,
})).pop().unwrap();
(it, sym)
}
/// Crawl over the crate, inserting test reexports and the test main function
fn generate_test_harness(sess: &ParseSess,
resolver: &mut dyn Resolver,
reexport_test_harness_main: Option<Symbol>,
krate: &mut ast::Crate,
sd: &errors::Handler,
features: &Features,
test_runner: Option<ast::Path>) {
// Remove the entry points
let mut cleaner = EntryPointCleaner { depth: 0 };
cleaner.visit_crate(krate);
let mark = Mark::fresh(Mark::root());
let mut econfig = ExpansionConfig::default("test".to_string());
econfig.features = Some(features);
let cx = TestCtxt {
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, econfig, resolver),
path: Vec::new(),
test_cases: Vec::new(),
reexport_test_harness_main,
// N.B., doesn't consider the value of `--crate-name` passed on the command line.
is_libtest: attr::find_crate_name(&krate.attrs).map(|s| s == "test").unwrap_or(false),
toplevel_reexport: None,
ctxt: SyntaxContext::empty().apply_mark(mark),
features,
test_runner
};
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
def_site: None,
format: MacroAttribute(Symbol::intern("test_case")),
allow_internal_unstable: Some(vec![
Symbol::intern("main"),
Symbol::intern("test"),
Symbol::intern("rustc_attrs"),
].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition: hygiene::default_edition(),
});
TestHarnessGenerator {
cx,
tests: Vec::new(),
tested_submods: Vec::new(),
}.visit_crate(krate);
}
/// Craft a span that will be ignored by the stability lint's
/// call to source_map's `is_internal` check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt<'_>, sp: Span) -> Span {
sp.with_ctxt(cx.ctxt)
}
enum HasTestSignature {
Yes,
No(BadTestSignature),
}
#[derive(PartialEq)]
enum BadTestSignature {
NotEvenAFunction,
WrongTypeSignature,
NoArgumentsAllowed,
ShouldPanicOnlyWithNoArgs,
}
/// Creates a function item for use as the main function of a test build.
/// This function will call the `test_runner` as specified by the crate attribute
fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// test::test_main_static(::std::os::args().as_slice(), &[..tests]);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
let test_id = ecx.ident_of("test").gensym();
// test::test_main_static(...)
let mut test_runner = cx.test_runner.clone().unwrap_or(
ecx.path(sp, vec![
test_id, ecx.ident_of("test_main_static")
]));
test_runner.span = sp;
let test_main_path_expr = ecx.expr_path(test_runner);
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![mk_tests_slice(cx)]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, Symbol::intern("main"));
let main_attr = ecx.attribute(sp, main_meta);
// extern crate test as test_gensym
let test_extern_stmt = ecx.stmt_item(sp, ecx.item(sp,
test_id,
vec![],
ast::ItemKind::ExternCrate(Some(Symbol::intern("test")))
));
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(vec![]));
// If no test runner is provided we need to import the test crate
let main_body = if cx.test_runner.is_none() {
ecx.block(sp, vec![test_extern_stmt, call_test_main])
} else {
ecx.block(sp, vec![call_test_main])
};
let main = ast::ItemKind::Fn(ecx.fn_decl(vec![], ast::FunctionRetTy::Ty(main_ret_ty)),
ast::FnHeader::default(),
ast::Generics::default(),
main_body);
// Honor the reexport_test_harness_main attribute
let main_id = Ident::new(
cx.reexport_test_harness_main.unwrap_or(Symbol::gensym("main")),
sp);
P(ast::Item {
ident: main_id,
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: dummy_spanned(ast::VisibilityKind::Public),
span: sp,
tokens: None,
})
}
fn path_name_i(idents: &[Ident]) -> String {
let mut path_name = "".to_string();
let mut idents_iter = idents.iter().peekable();
while let Some(ident) = idents_iter.next() {
path_name.push_str(&ident.as_str());
if idents_iter.peek().is_some() {
path_name.push_str("::")
}
}
path_name
}
/// Creates a slice containing every test like so:
/// &[path::to::test1, path::to::test2]
fn mk_tests_slice(cx: &TestCtxt<'_>) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.test_cases.len());
let ref ecx = cx.ext_cx;
ecx.expr_vec_slice(DUMMY_SP,
cx.test_cases.iter().map(|test| {
ecx.expr_addr_of(test.span,
ecx.expr_path(ecx.path(test.span, visible_path(cx, &test.path))))
}).collect())
}
/// Creates a path from the top-level __test module to the test via __test_reexports
fn visible_path(cx: &TestCtxt<'_>, path: &[Ident]) -> Vec<Ident>{
let mut visible_path = vec![];
match cx.toplevel_reexport {
Some(id) => visible_path.push(id),
None => {
cx.span_diagnostic.bug("expected to find top-level re-export name, but found None");
}
}
visible_path.extend_from_slice(path);
visible_path
}
fn is_test_case(i: &ast::Item) -> bool |
fn get_test_runner(sd: &errors::Handler, krate: &ast::Crate) -> Option<ast::Path> {
let test_attr = attr::find_by_name(&krate.attrs, "test_runner")?;
test_attr.meta_item_list().map(|meta_list| {
if meta_list.len()!= 1 {
sd.span_fatal(test_attr.span,
"#![test_runner(..)] accepts exactly 1 argument").raise()
}
match meta_list[0].meta_item() {
Some(meta_item) if meta_item.is_word() => meta_item.path.clone(),
_ => sd.span_fatal(test_attr.span, "`test_runner` argument must be a path").raise()
}
})
}
| {
attr::contains_name(&i.attrs, "rustc_test_marker")
} | identifier_body |
test.rs | // Code that generates a test runner to run all the tests in a crate
#![allow(dead_code)]
#![allow(unused_imports)]
use HasTestSignature::*;
use std::iter;
use std::slice;
use std::mem;
use std::vec;
use log::debug;
use smallvec::{smallvec, SmallVec};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, Span, SourceFile, BytePos};
use crate::attr::{self, HasAttrs};
use crate::source_map::{self, SourceMap, ExpnInfo, MacroAttribute, dummy_spanned, respan};
use crate::config;
use crate::entry::{self, EntryPointType};
use crate::ext::base::{ExtCtxt, Resolver};
use crate::ext::build::AstBuilder;
use crate::ext::expand::ExpansionConfig;
use crate::ext::hygiene::{self, Mark, SyntaxContext};
use crate::mut_visit::{*, ExpectOne};
use crate::feature_gate::Features;
use crate::util::map_in_place::MapInPlace;
use crate::parse::{token, ParseSess};
use crate::print::pprust;
use crate::ast::{self, Ident};
use crate::ptr::P;
use crate::symbol::{self, Symbol, keywords};
use crate::ThinVec;
struct Test {
span: Span,
path: Vec<Ident>,
}
struct TestCtxt<'a> {
span_diagnostic: &'a errors::Handler,
path: Vec<Ident>,
ext_cx: ExtCtxt<'a>,
test_cases: Vec<Test>,
reexport_test_harness_main: Option<Symbol>,
is_libtest: bool,
ctxt: SyntaxContext,
features: &'a Features,
test_runner: Option<ast::Path>,
// top-level re-export submodule, filled out after folding is finished
toplevel_reexport: Option<Ident>,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: &ParseSess,
resolver: &mut dyn Resolver,
should_test: bool,
krate: &mut ast::Crate,
span_diagnostic: &errors::Handler,
features: &Features) {
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use __test::main as some_name;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
// Do this here so that the test_runner crate attribute gets marked as used
// even in non-test builds
let test_runner = get_test_runner(span_diagnostic, &krate);
if should_test {
generate_test_harness(sess, resolver, reexport_test_harness_main,
krate, span_diagnostic, features, test_runner)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(Ident, Ident)>,
}
impl<'a> MutVisitor for TestHarnessGenerator<'a> {
fn visit_crate(&mut self, c: &mut ast::Crate) {
noop_visit_crate(c, self);
// Create a main function to run our tests
let test_main = {
let unresolved = mk_main(&mut self.cx);
self.cx.ext_cx.monotonic_expander().flat_map_item(unresolved).pop().unwrap()
};
c.module.items.push(test_main);
}
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
let ident = i.ident;
if ident.name!= keywords::Invalid.name() {
self.cx.path.push(ident);
}
debug!("current path: {}", path_name_i(&self.cx.path));
let mut item = i.into_inner();
if is_test_case(&item) {
debug!("this is a test item");
let test = Test {
span: item.span,
path: self.cx.path.clone(),
};
self.cx.test_cases.push(test);
self.tests.push(item.ident);
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
if let ast::ItemKind::Mod(mut module) = item.node {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
noop_visit_mod(&mut module, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, item.id, tests, tested_submods);
module.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
item.node = ast::ItemKind::Mod(module);
}
if ident.name!= keywords::Invalid.name() {
self.cx.path.pop();
}
smallvec![P(item)]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// A folder used to remove any entry points (like fn main) because the harness
/// generator will provide its own
struct EntryPointCleaner {
// Current depth in the ast
depth: usize,
} | fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
self.depth += 1;
let item = noop_flat_map_item(i, self).expect_one("noop did something");
self.depth -= 1;
// Remove any #[main] or #[start] from the AST so it doesn't
// clash with the one we're going to add, but mark it as
// #[allow(dead_code)] to avoid printing warnings.
let item = match entry::entry_point_type(&item, self.depth) {
EntryPointType::MainNamed |
EntryPointType::MainAttr |
EntryPointType::Start =>
item.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| {
let allow_ident = Ident::from_str("allow");
let dc_nested = attr::mk_nested_word_item(Ident::from_str("dead_code"));
let allow_dead_code_item = attr::mk_list_item(DUMMY_SP, allow_ident,
vec![dc_nested]);
let allow_dead_code = attr::mk_attr_outer(DUMMY_SP,
attr::mk_attr_id(),
allow_dead_code_item);
ast::Item {
id,
ident,
attrs: attrs.into_iter()
.filter(|attr| {
!attr.check_name("main") &&!attr.check_name("start")
})
.chain(iter::once(allow_dead_code))
.collect(),
node,
vis,
span,
tokens,
}
}),
EntryPointType::None |
EntryPointType::OtherMain => item,
};
smallvec![item]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// Creates an item (specifically a module) that "pub use"s the tests passed in.
/// Each tested submodule will contain a similar reexport module that we will export
/// under the name of the original module. That is, `submod::__test_reexports` is
/// reexported like so `pub use submod::__test_reexports as submod`.
fn mk_reexport_mod(cx: &mut TestCtxt<'_>,
parent: ast::NodeId,
tests: Vec<Ident>,
tested_submods: Vec<(Ident, Ident)>)
-> (P<ast::Item>, Ident) {
let super_ = Ident::from_str("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
Some(r), path)
})).collect();
let reexport_mod = ast::Mod {
inline: true,
inner: DUMMY_SP,
items,
};
let sym = Ident::with_empty_ctxt(Symbol::gensym("__test_reexports"));
let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().flat_map_item(P(ast::Item {
ident: sym,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mod(reexport_mod),
vis: dummy_spanned(ast::VisibilityKind::Public),
span: DUMMY_SP,
tokens: None,
})).pop().unwrap();
(it, sym)
}
/// Crawl over the crate, inserting test reexports and the test main function
fn generate_test_harness(sess: &ParseSess,
resolver: &mut dyn Resolver,
reexport_test_harness_main: Option<Symbol>,
krate: &mut ast::Crate,
sd: &errors::Handler,
features: &Features,
test_runner: Option<ast::Path>) {
// Remove the entry points
let mut cleaner = EntryPointCleaner { depth: 0 };
cleaner.visit_crate(krate);
let mark = Mark::fresh(Mark::root());
let mut econfig = ExpansionConfig::default("test".to_string());
econfig.features = Some(features);
let cx = TestCtxt {
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, econfig, resolver),
path: Vec::new(),
test_cases: Vec::new(),
reexport_test_harness_main,
// N.B., doesn't consider the value of `--crate-name` passed on the command line.
is_libtest: attr::find_crate_name(&krate.attrs).map(|s| s == "test").unwrap_or(false),
toplevel_reexport: None,
ctxt: SyntaxContext::empty().apply_mark(mark),
features,
test_runner
};
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
def_site: None,
format: MacroAttribute(Symbol::intern("test_case")),
allow_internal_unstable: Some(vec![
Symbol::intern("main"),
Symbol::intern("test"),
Symbol::intern("rustc_attrs"),
].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition: hygiene::default_edition(),
});
TestHarnessGenerator {
cx,
tests: Vec::new(),
tested_submods: Vec::new(),
}.visit_crate(krate);
}
/// Craft a span that will be ignored by the stability lint's
/// call to source_map's `is_internal` check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt<'_>, sp: Span) -> Span {
sp.with_ctxt(cx.ctxt)
}
enum HasTestSignature {
Yes,
No(BadTestSignature),
}
#[derive(PartialEq)]
enum BadTestSignature {
NotEvenAFunction,
WrongTypeSignature,
NoArgumentsAllowed,
ShouldPanicOnlyWithNoArgs,
}
/// Creates a function item for use as the main function of a test build.
/// This function will call the `test_runner` as specified by the crate attribute
fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// test::test_main_static(::std::os::args().as_slice(), &[..tests]);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
let test_id = ecx.ident_of("test").gensym();
// test::test_main_static(...)
let mut test_runner = cx.test_runner.clone().unwrap_or(
ecx.path(sp, vec![
test_id, ecx.ident_of("test_main_static")
]));
test_runner.span = sp;
let test_main_path_expr = ecx.expr_path(test_runner);
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![mk_tests_slice(cx)]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, Symbol::intern("main"));
let main_attr = ecx.attribute(sp, main_meta);
// extern crate test as test_gensym
let test_extern_stmt = ecx.stmt_item(sp, ecx.item(sp,
test_id,
vec![],
ast::ItemKind::ExternCrate(Some(Symbol::intern("test")))
));
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(vec![]));
// If no test runner is provided we need to import the test crate
let main_body = if cx.test_runner.is_none() {
ecx.block(sp, vec![test_extern_stmt, call_test_main])
} else {
ecx.block(sp, vec![call_test_main])
};
let main = ast::ItemKind::Fn(ecx.fn_decl(vec![], ast::FunctionRetTy::Ty(main_ret_ty)),
ast::FnHeader::default(),
ast::Generics::default(),
main_body);
// Honor the reexport_test_harness_main attribute
let main_id = Ident::new(
cx.reexport_test_harness_main.unwrap_or(Symbol::gensym("main")),
sp);
P(ast::Item {
ident: main_id,
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: dummy_spanned(ast::VisibilityKind::Public),
span: sp,
tokens: None,
})
}
fn path_name_i(idents: &[Ident]) -> String {
let mut path_name = "".to_string();
let mut idents_iter = idents.iter().peekable();
while let Some(ident) = idents_iter.next() {
path_name.push_str(&ident.as_str());
if idents_iter.peek().is_some() {
path_name.push_str("::")
}
}
path_name
}
/// Creates a slice containing every test like so:
/// &[path::to::test1, path::to::test2]
fn mk_tests_slice(cx: &TestCtxt<'_>) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.test_cases.len());
let ref ecx = cx.ext_cx;
ecx.expr_vec_slice(DUMMY_SP,
cx.test_cases.iter().map(|test| {
ecx.expr_addr_of(test.span,
ecx.expr_path(ecx.path(test.span, visible_path(cx, &test.path))))
}).collect())
}
/// Creates a path from the top-level __test module to the test via __test_reexports
fn visible_path(cx: &TestCtxt<'_>, path: &[Ident]) -> Vec<Ident>{
let mut visible_path = vec![];
match cx.toplevel_reexport {
Some(id) => visible_path.push(id),
None => {
cx.span_diagnostic.bug("expected to find top-level re-export name, but found None");
}
}
visible_path.extend_from_slice(path);
visible_path
}
fn is_test_case(i: &ast::Item) -> bool {
attr::contains_name(&i.attrs, "rustc_test_marker")
}
fn get_test_runner(sd: &errors::Handler, krate: &ast::Crate) -> Option<ast::Path> {
let test_attr = attr::find_by_name(&krate.attrs, "test_runner")?;
test_attr.meta_item_list().map(|meta_list| {
if meta_list.len()!= 1 {
sd.span_fatal(test_attr.span,
"#![test_runner(..)] accepts exactly 1 argument").raise()
}
match meta_list[0].meta_item() {
Some(meta_item) if meta_item.is_word() => meta_item.path.clone(),
_ => sd.span_fatal(test_attr.span, "`test_runner` argument must be a path").raise()
}
})
} |
impl MutVisitor for EntryPointCleaner { | random_line_split |
test.rs | // Code that generates a test runner to run all the tests in a crate
#![allow(dead_code)]
#![allow(unused_imports)]
use HasTestSignature::*;
use std::iter;
use std::slice;
use std::mem;
use std::vec;
use log::debug;
use smallvec::{smallvec, SmallVec};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, Span, SourceFile, BytePos};
use crate::attr::{self, HasAttrs};
use crate::source_map::{self, SourceMap, ExpnInfo, MacroAttribute, dummy_spanned, respan};
use crate::config;
use crate::entry::{self, EntryPointType};
use crate::ext::base::{ExtCtxt, Resolver};
use crate::ext::build::AstBuilder;
use crate::ext::expand::ExpansionConfig;
use crate::ext::hygiene::{self, Mark, SyntaxContext};
use crate::mut_visit::{*, ExpectOne};
use crate::feature_gate::Features;
use crate::util::map_in_place::MapInPlace;
use crate::parse::{token, ParseSess};
use crate::print::pprust;
use crate::ast::{self, Ident};
use crate::ptr::P;
use crate::symbol::{self, Symbol, keywords};
use crate::ThinVec;
struct Test {
span: Span,
path: Vec<Ident>,
}
struct TestCtxt<'a> {
span_diagnostic: &'a errors::Handler,
path: Vec<Ident>,
ext_cx: ExtCtxt<'a>,
test_cases: Vec<Test>,
reexport_test_harness_main: Option<Symbol>,
is_libtest: bool,
ctxt: SyntaxContext,
features: &'a Features,
test_runner: Option<ast::Path>,
// top-level re-export submodule, filled out after folding is finished
toplevel_reexport: Option<Ident>,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: &ParseSess,
resolver: &mut dyn Resolver,
should_test: bool,
krate: &mut ast::Crate,
span_diagnostic: &errors::Handler,
features: &Features) {
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use __test::main as some_name;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
// Do this here so that the test_runner crate attribute gets marked as used
// even in non-test builds
let test_runner = get_test_runner(span_diagnostic, &krate);
if should_test {
generate_test_harness(sess, resolver, reexport_test_harness_main,
krate, span_diagnostic, features, test_runner)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(Ident, Ident)>,
}
impl<'a> MutVisitor for TestHarnessGenerator<'a> {
fn visit_crate(&mut self, c: &mut ast::Crate) {
noop_visit_crate(c, self);
// Create a main function to run our tests
let test_main = {
let unresolved = mk_main(&mut self.cx);
self.cx.ext_cx.monotonic_expander().flat_map_item(unresolved).pop().unwrap()
};
c.module.items.push(test_main);
}
fn | (&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
let ident = i.ident;
if ident.name!= keywords::Invalid.name() {
self.cx.path.push(ident);
}
debug!("current path: {}", path_name_i(&self.cx.path));
let mut item = i.into_inner();
if is_test_case(&item) {
debug!("this is a test item");
let test = Test {
span: item.span,
path: self.cx.path.clone(),
};
self.cx.test_cases.push(test);
self.tests.push(item.ident);
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
if let ast::ItemKind::Mod(mut module) = item.node {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
noop_visit_mod(&mut module, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, item.id, tests, tested_submods);
module.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
item.node = ast::ItemKind::Mod(module);
}
if ident.name!= keywords::Invalid.name() {
self.cx.path.pop();
}
smallvec![P(item)]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// A folder used to remove any entry points (like fn main) because the harness
/// generator will provide its own
struct EntryPointCleaner {
// Current depth in the ast
depth: usize,
}
impl MutVisitor for EntryPointCleaner {
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
self.depth += 1;
let item = noop_flat_map_item(i, self).expect_one("noop did something");
self.depth -= 1;
// Remove any #[main] or #[start] from the AST so it doesn't
// clash with the one we're going to add, but mark it as
// #[allow(dead_code)] to avoid printing warnings.
let item = match entry::entry_point_type(&item, self.depth) {
EntryPointType::MainNamed |
EntryPointType::MainAttr |
EntryPointType::Start =>
item.map(|ast::Item {id, ident, attrs, node, vis, span, tokens}| {
let allow_ident = Ident::from_str("allow");
let dc_nested = attr::mk_nested_word_item(Ident::from_str("dead_code"));
let allow_dead_code_item = attr::mk_list_item(DUMMY_SP, allow_ident,
vec![dc_nested]);
let allow_dead_code = attr::mk_attr_outer(DUMMY_SP,
attr::mk_attr_id(),
allow_dead_code_item);
ast::Item {
id,
ident,
attrs: attrs.into_iter()
.filter(|attr| {
!attr.check_name("main") &&!attr.check_name("start")
})
.chain(iter::once(allow_dead_code))
.collect(),
node,
vis,
span,
tokens,
}
}),
EntryPointType::None |
EntryPointType::OtherMain => item,
};
smallvec![item]
}
fn visit_mac(&mut self, _mac: &mut ast::Mac) {
// Do nothing.
}
}
/// Creates an item (specifically a module) that "pub use"s the tests passed in.
/// Each tested submodule will contain a similar reexport module that we will export
/// under the name of the original module. That is, `submod::__test_reexports` is
/// reexported like so `pub use submod::__test_reexports as submod`.
fn mk_reexport_mod(cx: &mut TestCtxt<'_>,
parent: ast::NodeId,
tests: Vec<Ident>,
tested_submods: Vec<(Ident, Ident)>)
-> (P<ast::Item>, Ident) {
let super_ = Ident::from_str("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, dummy_spanned(ast::VisibilityKind::Public),
Some(r), path)
})).collect();
let reexport_mod = ast::Mod {
inline: true,
inner: DUMMY_SP,
items,
};
let sym = Ident::with_empty_ctxt(Symbol::gensym("__test_reexports"));
let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().flat_map_item(P(ast::Item {
ident: sym,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mod(reexport_mod),
vis: dummy_spanned(ast::VisibilityKind::Public),
span: DUMMY_SP,
tokens: None,
})).pop().unwrap();
(it, sym)
}
/// Crawl over the crate, inserting test reexports and the test main function
fn generate_test_harness(sess: &ParseSess,
resolver: &mut dyn Resolver,
reexport_test_harness_main: Option<Symbol>,
krate: &mut ast::Crate,
sd: &errors::Handler,
features: &Features,
test_runner: Option<ast::Path>) {
// Remove the entry points
let mut cleaner = EntryPointCleaner { depth: 0 };
cleaner.visit_crate(krate);
let mark = Mark::fresh(Mark::root());
let mut econfig = ExpansionConfig::default("test".to_string());
econfig.features = Some(features);
let cx = TestCtxt {
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, econfig, resolver),
path: Vec::new(),
test_cases: Vec::new(),
reexport_test_harness_main,
// N.B., doesn't consider the value of `--crate-name` passed on the command line.
is_libtest: attr::find_crate_name(&krate.attrs).map(|s| s == "test").unwrap_or(false),
toplevel_reexport: None,
ctxt: SyntaxContext::empty().apply_mark(mark),
features,
test_runner
};
mark.set_expn_info(ExpnInfo {
call_site: DUMMY_SP,
def_site: None,
format: MacroAttribute(Symbol::intern("test_case")),
allow_internal_unstable: Some(vec![
Symbol::intern("main"),
Symbol::intern("test"),
Symbol::intern("rustc_attrs"),
].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition: hygiene::default_edition(),
});
TestHarnessGenerator {
cx,
tests: Vec::new(),
tested_submods: Vec::new(),
}.visit_crate(krate);
}
/// Craft a span that will be ignored by the stability lint's
/// call to source_map's `is_internal` check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt<'_>, sp: Span) -> Span {
sp.with_ctxt(cx.ctxt)
}
enum HasTestSignature {
Yes,
No(BadTestSignature),
}
#[derive(PartialEq)]
enum BadTestSignature {
NotEvenAFunction,
WrongTypeSignature,
NoArgumentsAllowed,
ShouldPanicOnlyWithNoArgs,
}
/// Creates a function item for use as the main function of a test build.
/// This function will call the `test_runner` as specified by the crate attribute
fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// test::test_main_static(::std::os::args().as_slice(), &[..tests]);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
let test_id = ecx.ident_of("test").gensym();
// test::test_main_static(...)
let mut test_runner = cx.test_runner.clone().unwrap_or(
ecx.path(sp, vec![
test_id, ecx.ident_of("test_main_static")
]));
test_runner.span = sp;
let test_main_path_expr = ecx.expr_path(test_runner);
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![mk_tests_slice(cx)]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, Symbol::intern("main"));
let main_attr = ecx.attribute(sp, main_meta);
// extern crate test as test_gensym
let test_extern_stmt = ecx.stmt_item(sp, ecx.item(sp,
test_id,
vec![],
ast::ItemKind::ExternCrate(Some(Symbol::intern("test")))
));
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(vec![]));
// If no test runner is provided we need to import the test crate
let main_body = if cx.test_runner.is_none() {
ecx.block(sp, vec![test_extern_stmt, call_test_main])
} else {
ecx.block(sp, vec![call_test_main])
};
let main = ast::ItemKind::Fn(ecx.fn_decl(vec![], ast::FunctionRetTy::Ty(main_ret_ty)),
ast::FnHeader::default(),
ast::Generics::default(),
main_body);
// Honor the reexport_test_harness_main attribute
let main_id = Ident::new(
cx.reexport_test_harness_main.unwrap_or(Symbol::gensym("main")),
sp);
P(ast::Item {
ident: main_id,
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: dummy_spanned(ast::VisibilityKind::Public),
span: sp,
tokens: None,
})
}
fn path_name_i(idents: &[Ident]) -> String {
let mut path_name = "".to_string();
let mut idents_iter = idents.iter().peekable();
while let Some(ident) = idents_iter.next() {
path_name.push_str(&ident.as_str());
if idents_iter.peek().is_some() {
path_name.push_str("::")
}
}
path_name
}
/// Creates a slice containing every test like so:
/// &[path::to::test1, path::to::test2]
fn mk_tests_slice(cx: &TestCtxt<'_>) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.test_cases.len());
let ref ecx = cx.ext_cx;
ecx.expr_vec_slice(DUMMY_SP,
cx.test_cases.iter().map(|test| {
ecx.expr_addr_of(test.span,
ecx.expr_path(ecx.path(test.span, visible_path(cx, &test.path))))
}).collect())
}
/// Creates a path from the top-level __test module to the test via __test_reexports
fn visible_path(cx: &TestCtxt<'_>, path: &[Ident]) -> Vec<Ident>{
let mut visible_path = vec![];
match cx.toplevel_reexport {
Some(id) => visible_path.push(id),
None => {
cx.span_diagnostic.bug("expected to find top-level re-export name, but found None");
}
}
visible_path.extend_from_slice(path);
visible_path
}
fn is_test_case(i: &ast::Item) -> bool {
attr::contains_name(&i.attrs, "rustc_test_marker")
}
fn get_test_runner(sd: &errors::Handler, krate: &ast::Crate) -> Option<ast::Path> {
let test_attr = attr::find_by_name(&krate.attrs, "test_runner")?;
test_attr.meta_item_list().map(|meta_list| {
if meta_list.len()!= 1 {
sd.span_fatal(test_attr.span,
"#![test_runner(..)] accepts exactly 1 argument").raise()
}
match meta_list[0].meta_item() {
Some(meta_item) if meta_item.is_word() => meta_item.path.clone(),
_ => sd.span_fatal(test_attr.span, "`test_runner` argument must be a path").raise()
}
})
}
| flat_map_item | identifier_name |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if!(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn | (value: u16) -> Self {
Self(value)
}
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer.
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| from | identifier_name |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if!(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self |
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer.
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| {
Self(value)
} | identifier_body |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if!(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else |
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self {
Self(value)
}
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer.
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| {
Ok(DacCode::from(code as i16))
} | conditional_block |
dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use stm32h7xx_hal as hal;
use mutex_trait::Mutex;
use super::design_parameters::{SampleBuffer, MAX_SAMPLE_BUFFER_SIZE};
use super::timers;
use core::convert::TryFrom;
use hal::{
dma::{
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
DMAError, MemoryToPeripheral, Transfer,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[link_section = ".axisram.buffers"]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if!(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self {
Self(value)
}
}
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer, | ///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer.
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type Data = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4); | >,
}
impl $name {
/// Construct the DAC output channel. | random_line_split |
storage.rs | use regex::Regex;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use log::{error, info};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite_migration::{Migrations, M};
use super::errors::Error;
pub type DatabaseConnection = r2d2::PooledConnection<SqliteConnectionManager>;
pub type DatabaseConnectionPool = r2d2::Pool<SqliteConnectionManager>;
#[derive(PartialEq, Eq, Hash)]
pub struct RoomId {
id: String,
}
lazy_static::lazy_static! {
// Alphanumeric, Decimals "-" & "_" only and must be between 1 - 64 characters
static ref REGULAR_CHARACTERS_ONLY: Regex = Regex::new(r"^[\w-]{1,64}$").unwrap();
}
impl RoomId {
pub fn new(room_id: &str) -> Option<RoomId> {
if REGULAR_CHARACTERS_ONLY.is_match(room_id) {
return Some(RoomId { id: room_id.to_string() });
} else {
return None;
}
}
pub fn get_id(&self) -> &str {
&self.id
}
}
// Main
lazy_static::lazy_static! {
pub static ref MAIN_POOL: DatabaseConnectionPool = {
let file_name = "database.db";
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name);
return r2d2::Pool::new(db_manager).unwrap();
};
}
pub fn create_main_database_if_needed() {
let pool = &MAIN_POOL;
let conn = pool.get().unwrap();
create_main_tables_if_needed(&conn);
}
fn create_main_tables_if_needed(conn: &DatabaseConnection) |
// Rooms
pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60;
pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60;
pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60;
lazy_static::lazy_static! {
static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new());
}
pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool {
let mut pools = POOLS.lock().unwrap();
if let Some(pool) = pools.get(room_id.get_id()) {
return pool.clone();
} else {
let raw_path = format!("rooms/{}.db", room_id.get_id());
let path = Path::new(&raw_path);
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(db_manager).unwrap();
pools.insert(room_id.get_id().to_string(), pool);
return pools[room_id.get_id()].clone();
}
}
pub fn create_database_if_needed(room_id: &RoomId) {
let pool = pool_by_room_id(room_id);
let conn = pool.get().unwrap();
create_room_tables_if_needed(&conn);
}
pub fn create_room_tables_if_needed(conn: &DatabaseConnection) {
// Messages
// The `id` field is needed to make `rowid` stable, which is important because otherwise
// the `id`s in this table won't correspond to those in the deleted messages table
let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
public_key TEXT,
timestamp INTEGER,
data TEXT,
signature TEXT,
is_deleted INTEGER
)";
conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table.");
// Deleted messages
let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages (
id INTEGER PRIMARY KEY,
deleted_message_id INTEGER
)";
conn.execute(&deleted_messages_table_cmd, params![])
.expect("Couldn't create deleted messages table.");
// Moderators
let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators (
public_key TEXT
)";
conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table.");
// Block list
let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list (
public_key TEXT
)";
conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table.");
// Pending tokens
// Note that a given public key can have multiple pending tokens
let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens (
public_key TEXT,
timestamp INTEGER,
token BLOB
)";
conn.execute(&pending_tokens_table_cmd, params![])
.expect("Couldn't create pending tokens table.");
// Tokens
// The token is stored as hex here (rather than as bytes) because it's more convenient for lookup
let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table.");
// Files
let files_table_cmd = "CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY,
timestamp INTEGER
)";
conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table.");
// User activity table
let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity (
public_key TEXT PRIMARY KEY,
last_active INTEGER NOT NULL
)";
conn.execute(&user_activity_table_cmd, params![])
.expect("Couldn't create user activity table.");
}
// Pruning
pub async fn prune_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_tokens().await;
});
}
}
pub async fn prune_pending_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_pending_tokens().await;
});
}
}
pub async fn prune_files_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_files(FILE_EXPIRATION).await;
});
}
}
async fn prune_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
let stmt = "DELETE FROM tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
}
info!("Pruned tokens.");
}
async fn prune_pending_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - PENDING_TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
}
info!("Pruned pending tokens.");
}
fn get_expired_file_ids(
pool: &DatabaseConnectionPool, file_expiration: i64,
) -> Result<Vec<String>, ()> {
let now = chrono::Utc::now().timestamp();
let expiration = now - file_expiration;
// Get a database connection and open a transaction
let conn = pool.get().map_err(|e| {
error!("Couldn't get database connection to prune files due to error: {}.", e);
})?;
// Get the IDs of the files to delete
let raw_query = "SELECT id FROM files WHERE timestamp < (?1)";
let mut query = conn.prepare(&raw_query).map_err(|e| {
error!("Couldn't prepare query to prune files due to error: {}.", e);
})?;
let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| {
error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration);
})?;
Ok(rows.filter_map(|result| result.ok()).collect())
}
pub async fn prune_files_for_room(
pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64,
) {
let ids = get_expired_file_ids(&pool, file_expiration);
match ids {
Ok(ids) if!ids.is_empty() => {
// Delete the files
let futs = ids.iter().map(|id| async move {
(
tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await,
id.to_owned(),
)
});
let results = futures::future::join_all(futs).await;
for (res, id) in results {
if let Err(err) = res {
error!(
"Couldn't delete file: {} from room: {} due to error: {}.",
id,
room.get_id(),
err
);
}
}
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return error!(
"Couldn't get database connection to prune files due to error: {}.",
e
)
}
};
// Measure the time it takes to delete all files sequentially
// (this might become a problem since we're not using an async interface)
let now = std::time::Instant::now();
// Remove the file records from the database
// FIXME: It'd be great to do this in a single statement, but apparently this is not supported very well
for id in ids {
let stmt = "DELETE FROM files WHERE id = (?1)";
match conn.execute(&stmt, params![id]) {
Ok(_) => (),
Err(e) => {
return error!("Couldn't prune file with ID: {} due to error: {}.", id, e)
}
};
}
// Log the result
info!("Pruned files for room: {}. Took: {:?}", room.get_id(), now.elapsed());
}
Ok(_) => {
// empty
}
Err(_) => {
// It's not catastrophic if we fail to prune the database for a given room
}
}
}
pub async fn prune_files(file_expiration: i64) {
// The expiration setting is passed in for testing purposes
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
let futs = rooms.into_iter().map(|room| async move {
let pool = pool_by_room_id(&room);
prune_files_for_room(&pool, &room, file_expiration).await;
});
futures::future::join_all(futs).await;
}
// Migration
pub fn perform_migration() {
let rooms = match get_all_room_ids() {
Ok(ids) => ids,
Err(_e) => {
return error!("Couldn't get all room IDs.");
}
};
let create_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
let migrations =
Migrations::new(vec![M::up("DROP TABLE tokens"), M::up(&create_tokens_table_cmd)]);
for room in rooms {
create_database_if_needed(&room);
let pool = pool_by_room_id(&room);
let mut conn = pool.get().unwrap();
migrations.to_latest(&mut conn).unwrap();
}
}
// Utilities
fn get_all_room_ids() -> Result<Vec<RoomId>, Error> {
// Get a database connection
let conn = MAIN_POOL.get().map_err(|_| Error::DatabaseFailedInternally)?;
// Query the database
let raw_query = "SELECT id FROM main";
let mut query = conn.prepare(&raw_query).map_err(|_| Error::DatabaseFailedInternally)?;
let rows = match query.query_map(params![], |row| row.get(0)) {
Ok(rows) => rows,
Err(e) => {
error!("Couldn't query database due to error: {}.", e);
return Err(Error::DatabaseFailedInternally);
}
};
let room_ids: Vec<_> = rows
.filter_map(|result: Result<String, _>| result.ok())
.map(|opt| RoomId::new(&opt))
.flatten()
.collect();
// Return
return Ok(room_ids);
}
| {
let main_table_cmd = "CREATE TABLE IF NOT EXISTS main (
id TEXT PRIMARY KEY,
name TEXT,
image_id TEXT
)";
conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table.");
} | identifier_body |
storage.rs | use regex::Regex;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use log::{error, info};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite_migration::{Migrations, M};
use super::errors::Error;
pub type DatabaseConnection = r2d2::PooledConnection<SqliteConnectionManager>;
pub type DatabaseConnectionPool = r2d2::Pool<SqliteConnectionManager>;
#[derive(PartialEq, Eq, Hash)]
pub struct RoomId {
id: String,
}
lazy_static::lazy_static! {
// Alphanumeric, Decimals "-" & "_" only and must be between 1 - 64 characters
static ref REGULAR_CHARACTERS_ONLY: Regex = Regex::new(r"^[\w-]{1,64}$").unwrap();
}
impl RoomId {
pub fn new(room_id: &str) -> Option<RoomId> {
if REGULAR_CHARACTERS_ONLY.is_match(room_id) {
return Some(RoomId { id: room_id.to_string() });
} else {
return None;
}
}
pub fn get_id(&self) -> &str {
&self.id
}
}
// Main
lazy_static::lazy_static! {
pub static ref MAIN_POOL: DatabaseConnectionPool = {
let file_name = "database.db";
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name);
return r2d2::Pool::new(db_manager).unwrap();
};
}
pub fn create_main_database_if_needed() {
let pool = &MAIN_POOL;
let conn = pool.get().unwrap();
create_main_tables_if_needed(&conn);
}
fn create_main_tables_if_needed(conn: &DatabaseConnection) {
let main_table_cmd = "CREATE TABLE IF NOT EXISTS main (
id TEXT PRIMARY KEY,
name TEXT,
image_id TEXT
)";
conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table.");
}
// Rooms
pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60;
pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60;
pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60;
lazy_static::lazy_static! {
static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new());
}
pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool {
let mut pools = POOLS.lock().unwrap();
if let Some(pool) = pools.get(room_id.get_id()) {
return pool.clone();
} else {
let raw_path = format!("rooms/{}.db", room_id.get_id());
let path = Path::new(&raw_path);
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(db_manager).unwrap();
pools.insert(room_id.get_id().to_string(), pool);
return pools[room_id.get_id()].clone();
}
}
pub fn create_database_if_needed(room_id: &RoomId) {
let pool = pool_by_room_id(room_id);
let conn = pool.get().unwrap();
create_room_tables_if_needed(&conn);
}
pub fn create_room_tables_if_needed(conn: &DatabaseConnection) {
// Messages
// The `id` field is needed to make `rowid` stable, which is important because otherwise
// the `id`s in this table won't correspond to those in the deleted messages table
let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
public_key TEXT,
timestamp INTEGER,
data TEXT,
signature TEXT,
is_deleted INTEGER
)";
conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table.");
// Deleted messages
let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages (
id INTEGER PRIMARY KEY,
deleted_message_id INTEGER
)";
conn.execute(&deleted_messages_table_cmd, params![])
.expect("Couldn't create deleted messages table.");
// Moderators
let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators (
public_key TEXT
)";
conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table.");
// Block list
let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list (
public_key TEXT
)";
conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table.");
// Pending tokens
// Note that a given public key can have multiple pending tokens
let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens (
public_key TEXT,
timestamp INTEGER,
token BLOB
)";
conn.execute(&pending_tokens_table_cmd, params![])
.expect("Couldn't create pending tokens table.");
// Tokens
// The token is stored as hex here (rather than as bytes) because it's more convenient for lookup
let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table.");
// Files
let files_table_cmd = "CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY,
timestamp INTEGER
)";
conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table.");
// User activity table
let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity (
public_key TEXT PRIMARY KEY,
last_active INTEGER NOT NULL
)";
conn.execute(&user_activity_table_cmd, params![])
.expect("Couldn't create user activity table.");
}
// Pruning
pub async fn prune_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_tokens().await;
});
}
}
pub async fn prune_pending_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_pending_tokens().await;
});
}
}
pub async fn prune_files_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_files(FILE_EXPIRATION).await;
});
}
}
async fn prune_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
let stmt = "DELETE FROM tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
}
info!("Pruned tokens.");
}
async fn prune_pending_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e), | Ok(_) => (),
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
}
info!("Pruned pending tokens.");
}
fn get_expired_file_ids(
pool: &DatabaseConnectionPool, file_expiration: i64,
) -> Result<Vec<String>, ()> {
let now = chrono::Utc::now().timestamp();
let expiration = now - file_expiration;
// Get a database connection and open a transaction
let conn = pool.get().map_err(|e| {
error!("Couldn't get database connection to prune files due to error: {}.", e);
})?;
// Get the IDs of the files to delete
let raw_query = "SELECT id FROM files WHERE timestamp < (?1)";
let mut query = conn.prepare(&raw_query).map_err(|e| {
error!("Couldn't prepare query to prune files due to error: {}.", e);
})?;
let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| {
error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration);
})?;
Ok(rows.filter_map(|result| result.ok()).collect())
}
pub async fn prune_files_for_room(
pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64,
) {
let ids = get_expired_file_ids(&pool, file_expiration);
match ids {
Ok(ids) if!ids.is_empty() => {
// Delete the files
let futs = ids.iter().map(|id| async move {
(
tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await,
id.to_owned(),
)
});
let results = futures::future::join_all(futs).await;
for (res, id) in results {
if let Err(err) = res {
error!(
"Couldn't delete file: {} from room: {} due to error: {}.",
id,
room.get_id(),
err
);
}
}
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return error!(
"Couldn't get database connection to prune files due to error: {}.",
e
)
}
};
// Measure the time it takes to delete all files sequentially
// (this might become a problem since we're not using an async interface)
let now = std::time::Instant::now();
// Remove the file records from the database
// FIXME: It'd be great to do this in a single statement, but apparently this is not supported very well
for id in ids {
let stmt = "DELETE FROM files WHERE id = (?1)";
match conn.execute(&stmt, params![id]) {
Ok(_) => (),
Err(e) => {
return error!("Couldn't prune file with ID: {} due to error: {}.", id, e)
}
};
}
// Log the result
info!("Pruned files for room: {}. Took: {:?}", room.get_id(), now.elapsed());
}
Ok(_) => {
// empty
}
Err(_) => {
// It's not catastrophic if we fail to prune the database for a given room
}
}
}
pub async fn prune_files(file_expiration: i64) {
// The expiration setting is passed in for testing purposes
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
let futs = rooms.into_iter().map(|room| async move {
let pool = pool_by_room_id(&room);
prune_files_for_room(&pool, &room, file_expiration).await;
});
futures::future::join_all(futs).await;
}
// Migration
pub fn perform_migration() {
let rooms = match get_all_room_ids() {
Ok(ids) => ids,
Err(_e) => {
return error!("Couldn't get all room IDs.");
}
};
let create_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
let migrations =
Migrations::new(vec![M::up("DROP TABLE tokens"), M::up(&create_tokens_table_cmd)]);
for room in rooms {
create_database_if_needed(&room);
let pool = pool_by_room_id(&room);
let mut conn = pool.get().unwrap();
migrations.to_latest(&mut conn).unwrap();
}
}
// Utilities
fn get_all_room_ids() -> Result<Vec<RoomId>, Error> {
// Get a database connection
let conn = MAIN_POOL.get().map_err(|_| Error::DatabaseFailedInternally)?;
// Query the database
let raw_query = "SELECT id FROM main";
let mut query = conn.prepare(&raw_query).map_err(|_| Error::DatabaseFailedInternally)?;
let rows = match query.query_map(params![], |row| row.get(0)) {
Ok(rows) => rows,
Err(e) => {
error!("Couldn't query database due to error: {}.", e);
return Err(Error::DatabaseFailedInternally);
}
};
let room_ids: Vec<_> = rows
.filter_map(|result: Result<String, _>| result.ok())
.map(|opt| RoomId::new(&opt))
.flatten()
.collect();
// Return
return Ok(room_ids);
} | };
let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - PENDING_TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) { | random_line_split |
storage.rs | use regex::Regex;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use log::{error, info};
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::params;
use rusqlite_migration::{Migrations, M};
use super::errors::Error;
pub type DatabaseConnection = r2d2::PooledConnection<SqliteConnectionManager>;
pub type DatabaseConnectionPool = r2d2::Pool<SqliteConnectionManager>;
#[derive(PartialEq, Eq, Hash)]
pub struct RoomId {
id: String,
}
lazy_static::lazy_static! {
// Alphanumeric, Decimals "-" & "_" only and must be between 1 - 64 characters
static ref REGULAR_CHARACTERS_ONLY: Regex = Regex::new(r"^[\w-]{1,64}$").unwrap();
}
impl RoomId {
pub fn new(room_id: &str) -> Option<RoomId> {
if REGULAR_CHARACTERS_ONLY.is_match(room_id) {
return Some(RoomId { id: room_id.to_string() });
} else {
return None;
}
}
pub fn get_id(&self) -> &str {
&self.id
}
}
// Main
lazy_static::lazy_static! {
pub static ref MAIN_POOL: DatabaseConnectionPool = {
let file_name = "database.db";
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(file_name);
return r2d2::Pool::new(db_manager).unwrap();
};
}
pub fn create_main_database_if_needed() {
let pool = &MAIN_POOL;
let conn = pool.get().unwrap();
create_main_tables_if_needed(&conn);
}
fn | (conn: &DatabaseConnection) {
let main_table_cmd = "CREATE TABLE IF NOT EXISTS main (
id TEXT PRIMARY KEY,
name TEXT,
image_id TEXT
)";
conn.execute(&main_table_cmd, params![]).expect("Couldn't create main table.");
}
// Rooms
pub const PENDING_TOKEN_EXPIRATION: i64 = 10 * 60;
pub const TOKEN_EXPIRATION: i64 = 7 * 24 * 60 * 60;
pub const FILE_EXPIRATION: i64 = 15 * 24 * 60 * 60;
lazy_static::lazy_static! {
static ref POOLS: Mutex<HashMap<String, DatabaseConnectionPool>> = Mutex::new(HashMap::new());
}
pub fn pool_by_room_id(room_id: &RoomId) -> DatabaseConnectionPool {
let mut pools = POOLS.lock().unwrap();
if let Some(pool) = pools.get(room_id.get_id()) {
return pool.clone();
} else {
let raw_path = format!("rooms/{}.db", room_id.get_id());
let path = Path::new(&raw_path);
let db_manager = r2d2_sqlite::SqliteConnectionManager::file(path);
let pool = r2d2::Pool::new(db_manager).unwrap();
pools.insert(room_id.get_id().to_string(), pool);
return pools[room_id.get_id()].clone();
}
}
pub fn create_database_if_needed(room_id: &RoomId) {
let pool = pool_by_room_id(room_id);
let conn = pool.get().unwrap();
create_room_tables_if_needed(&conn);
}
pub fn create_room_tables_if_needed(conn: &DatabaseConnection) {
// Messages
// The `id` field is needed to make `rowid` stable, which is important because otherwise
// the `id`s in this table won't correspond to those in the deleted messages table
let messages_table_cmd = "CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
public_key TEXT,
timestamp INTEGER,
data TEXT,
signature TEXT,
is_deleted INTEGER
)";
conn.execute(&messages_table_cmd, params![]).expect("Couldn't create messages table.");
// Deleted messages
let deleted_messages_table_cmd = "CREATE TABLE IF NOT EXISTS deleted_messages (
id INTEGER PRIMARY KEY,
deleted_message_id INTEGER
)";
conn.execute(&deleted_messages_table_cmd, params![])
.expect("Couldn't create deleted messages table.");
// Moderators
let moderators_table_cmd = "CREATE TABLE IF NOT EXISTS moderators (
public_key TEXT
)";
conn.execute(&moderators_table_cmd, params![]).expect("Couldn't create moderators table.");
// Block list
let block_list_table_cmd = "CREATE TABLE IF NOT EXISTS block_list (
public_key TEXT
)";
conn.execute(&block_list_table_cmd, params![]).expect("Couldn't create block list table.");
// Pending tokens
// Note that a given public key can have multiple pending tokens
let pending_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS pending_tokens (
public_key TEXT,
timestamp INTEGER,
token BLOB
)";
conn.execute(&pending_tokens_table_cmd, params![])
.expect("Couldn't create pending tokens table.");
// Tokens
// The token is stored as hex here (rather than as bytes) because it's more convenient for lookup
let tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
conn.execute(&tokens_table_cmd, params![]).expect("Couldn't create tokens table.");
// Files
let files_table_cmd = "CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY,
timestamp INTEGER
)";
conn.execute(&files_table_cmd, params![]).expect("Couldn't create files table.");
// User activity table
let user_activity_table_cmd = "CREATE TABLE IF NOT EXISTS user_activity (
public_key TEXT PRIMARY KEY,
last_active INTEGER NOT NULL
)";
conn.execute(&user_activity_table_cmd, params![])
.expect("Couldn't create user activity table.");
}
// Pruning
pub async fn prune_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_tokens().await;
});
}
}
pub async fn prune_pending_tokens_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::minutes(10).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_pending_tokens().await;
});
}
}
pub async fn prune_files_periodically() {
let mut timer = tokio::time::interval(chrono::Duration::days(1).to_std().unwrap());
loop {
timer.tick().await;
tokio::spawn(async {
prune_files(FILE_EXPIRATION).await;
});
}
}
async fn prune_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
let stmt = "DELETE FROM tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune tokens due to error: {}.", e),
};
}
info!("Pruned tokens.");
}
async fn prune_pending_tokens() {
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
for room in rooms {
let pool = pool_by_room_id(&room);
// It's not catastrophic if we fail to prune the database for a given room
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
let stmt = "DELETE FROM pending_tokens WHERE timestamp < (?1)";
let now = chrono::Utc::now().timestamp();
let expiration = now - PENDING_TOKEN_EXPIRATION;
match conn.execute(&stmt, params![expiration]) {
Ok(_) => (),
Err(e) => return error!("Couldn't prune pending tokens due to error: {}.", e),
};
}
info!("Pruned pending tokens.");
}
fn get_expired_file_ids(
pool: &DatabaseConnectionPool, file_expiration: i64,
) -> Result<Vec<String>, ()> {
let now = chrono::Utc::now().timestamp();
let expiration = now - file_expiration;
// Get a database connection and open a transaction
let conn = pool.get().map_err(|e| {
error!("Couldn't get database connection to prune files due to error: {}.", e);
})?;
// Get the IDs of the files to delete
let raw_query = "SELECT id FROM files WHERE timestamp < (?1)";
let mut query = conn.prepare(&raw_query).map_err(|e| {
error!("Couldn't prepare query to prune files due to error: {}.", e);
})?;
let rows = query.query_map(params![expiration], |row| row.get(0)).map_err(|e| {
error!("Couldn't prune files due to error: {} (expiration = {}).", e, expiration);
})?;
Ok(rows.filter_map(|result| result.ok()).collect())
}
pub async fn prune_files_for_room(
pool: &DatabaseConnectionPool, room: &RoomId, file_expiration: i64,
) {
let ids = get_expired_file_ids(&pool, file_expiration);
match ids {
Ok(ids) if!ids.is_empty() => {
// Delete the files
let futs = ids.iter().map(|id| async move {
(
tokio::fs::remove_file(format!("files/{}_files/{}", room.get_id(), id)).await,
id.to_owned(),
)
});
let results = futures::future::join_all(futs).await;
for (res, id) in results {
if let Err(err) = res {
error!(
"Couldn't delete file: {} from room: {} due to error: {}.",
id,
room.get_id(),
err
);
}
}
let conn = match pool.get() {
Ok(conn) => conn,
Err(e) => {
return error!(
"Couldn't get database connection to prune files due to error: {}.",
e
)
}
};
// Measure the time it takes to delete all files sequentially
// (this might become a problem since we're not using an async interface)
let now = std::time::Instant::now();
// Remove the file records from the database
// FIXME: It'd be great to do this in a single statement, but apparently this is not supported very well
for id in ids {
let stmt = "DELETE FROM files WHERE id = (?1)";
match conn.execute(&stmt, params![id]) {
Ok(_) => (),
Err(e) => {
return error!("Couldn't prune file with ID: {} due to error: {}.", id, e)
}
};
}
// Log the result
info!("Pruned files for room: {}. Took: {:?}", room.get_id(), now.elapsed());
}
Ok(_) => {
// empty
}
Err(_) => {
// It's not catastrophic if we fail to prune the database for a given room
}
}
}
pub async fn prune_files(file_expiration: i64) {
// The expiration setting is passed in for testing purposes
let rooms = match get_all_room_ids() {
Ok(rooms) => rooms,
Err(_) => return,
};
let futs = rooms.into_iter().map(|room| async move {
let pool = pool_by_room_id(&room);
prune_files_for_room(&pool, &room, file_expiration).await;
});
futures::future::join_all(futs).await;
}
// Migration
pub fn perform_migration() {
let rooms = match get_all_room_ids() {
Ok(ids) => ids,
Err(_e) => {
return error!("Couldn't get all room IDs.");
}
};
let create_tokens_table_cmd = "CREATE TABLE IF NOT EXISTS tokens (
public_key TEXT,
timestamp INTEGER,
token TEXT PRIMARY KEY
)";
let migrations =
Migrations::new(vec![M::up("DROP TABLE tokens"), M::up(&create_tokens_table_cmd)]);
for room in rooms {
create_database_if_needed(&room);
let pool = pool_by_room_id(&room);
let mut conn = pool.get().unwrap();
migrations.to_latest(&mut conn).unwrap();
}
}
// Utilities
fn get_all_room_ids() -> Result<Vec<RoomId>, Error> {
// Get a database connection
let conn = MAIN_POOL.get().map_err(|_| Error::DatabaseFailedInternally)?;
// Query the database
let raw_query = "SELECT id FROM main";
let mut query = conn.prepare(&raw_query).map_err(|_| Error::DatabaseFailedInternally)?;
let rows = match query.query_map(params![], |row| row.get(0)) {
Ok(rows) => rows,
Err(e) => {
error!("Couldn't query database due to error: {}.", e);
return Err(Error::DatabaseFailedInternally);
}
};
let room_ids: Vec<_> = rows
.filter_map(|result: Result<String, _>| result.ok())
.map(|opt| RoomId::new(&opt))
.flatten()
.collect();
// Return
return Ok(room_ids);
}
| create_main_tables_if_needed | identifier_name |
decode.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use crate::unescape::unescape;
use std::borrow::Cow;
use std::convert::TryFrom;
use thiserror::Error;
use xmlparser::{ElementEnd, Token, Tokenizer};
pub type Depth = usize;
// in general, these errors are just for reporting what happened, there isn't
// much value in lots of different match variants
#[derive(Debug, Error)]
pub enum XmlError {
#[error("XML Parse Error")]
InvalidXml(#[from] xmlparser::Error),
#[error("Invalid XML Escape: {esc}")]
InvalidEscape { esc: String },
#[error("Error parsing XML: {0}")]
Custom(Cow<'static, str>),
#[error("Encountered another error parsing XML: {0}")]
Unhandled(#[from] Box<dyn std::error::Error + Send + Sync +'static>),
}
impl XmlError {
pub fn custom(msg: impl Into<Cow<'static, str>>) -> Self {
XmlError::Custom(msg.into())
}
}
#[derive(PartialEq, Debug)]
pub struct | <'a> {
pub prefix: &'a str,
pub local: &'a str,
}
impl Name<'_> {
/// Check if a given name matches a tag name composed of `prefix:local` or just `local`
pub fn matches(&self, tag_name: &str) -> bool {
let split = tag_name.find(':');
match split {
None => tag_name == self.local,
Some(idx) => {
let (prefix, local) = tag_name.split_at(idx);
let local = &local[1..];
self.local == local && self.prefix == prefix
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct Attr<'a> {
name: Name<'a>,
// attribute values can be escaped (eg. with double quotes, so we need a Cow)
value: Cow<'a, str>,
}
#[derive(Debug, PartialEq)]
pub struct StartEl<'a> {
name: Name<'a>,
attributes: Vec<Attr<'a>>,
closed: bool,
depth: Depth,
}
/// Xml Start Element
///
/// ```xml
/// <a:b c="d">
/// ^^^ ^^^^^
/// name attributes
/// ```
impl<'a> StartEl<'a> {
pub fn depth(&self) -> Depth {
self.depth
}
fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self {
Self {
name: Name { prefix, local },
attributes: vec![],
closed: false,
depth,
}
}
/// Retrieve an attribute with a given key
///
/// key `prefix:local` combined as a str, joined by a `:`
pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> {
self.attributes
.iter()
.find(|attr| attr.name.matches(key))
.map(|attr| attr.value.as_ref())
}
/// Returns whether this `StartEl` matches a given name
/// in `prefix:local` form.
pub fn matches(&self, pat: &str) -> bool {
self.name.matches(pat)
}
/// Local component of this element's name
///
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn local(&self) -> &str {
self.name.local
}
/// Prefix component of this elements name (or empty string)
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn prefix(&self) -> &str {
self.name.prefix
}
/// Returns true of `el` at `depth` is a match for this `start_el`
fn end_el(&self, el: ElementEnd, depth: Depth) -> bool {
if depth!= self.depth {
return false;
}
match el {
ElementEnd::Open => false,
ElementEnd::Close(prefix, local) => {
prefix.as_str() == self.name.prefix && local.as_str() == self.name.local
}
ElementEnd::Empty => false,
}
}
}
/// Xml Document abstraction
///
/// This document wraps a lazy tokenizer with depth tracking.
/// Constructing a document is essentially free.
pub struct Document<'a> {
tokenizer: Tokenizer<'a>,
depth: Depth,
}
impl<'a> TryFrom<&'a [u8]> for Document<'a> {
type Error = XmlError;
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
Ok(Document::new(
std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?,
))
}
}
impl<'inp> Document<'inp> {
pub fn new(doc: &'inp str) -> Self {
Document {
tokenizer: Tokenizer::from(doc),
depth: 0,
}
}
/// "Depth first" iterator
///
/// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next
/// start element regardless of depth. This is useful to give a pointer into the middle
/// of a document to start reading.
///
/// ```xml
/// <Response> <-- first call returns this:
/// <A> <-- next call
/// <Nested /> <-- next call returns this
/// <MoreNested>hello</MoreNested> <-- then this:
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> {
next_start_element(self)
}
/// A scoped reader for the entire document
pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> {
let start_el = self
.next_start_element()
.ok_or_else(|| XmlError::custom("no root element"))?;
Ok(ScopedDecoder {
doc: self,
start_el,
terminated: false,
})
}
/// A scoped reader for a specific tag
///
/// This method is necessary for when you need to return a ScopedDecoder from a function
/// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference
/// to a field owned by the current function
pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: self,
start_el,
terminated: false,
}
}
}
/// Depth tracking iterator
///
/// ```xml
/// <a> <- startel depth 0
/// <b> <- startel depth 1
/// <c> <- startel depth 2
/// </c> <- endel depth 2
/// </b> <- endel depth 1
/// </a> <- endel depth 0
/// ```
impl<'inp> Iterator for Document<'inp> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> {
let tok = self.tokenizer.next()?;
let tok = match tok {
Err(e) => return Some(Err(e.into())),
Ok(tok) => tok,
};
// depth bookkeeping
match tok {
Token::ElementEnd {
end: ElementEnd::Close(_, _),
..
} => {
self.depth -= 1;
}
Token::ElementEnd {
end: ElementEnd::Empty,
..
} => self.depth -= 1,
t @ Token::ElementStart {.. } => {
self.depth += 1;
// We want the startel and endel to have the same depth, but after the opener,
// the parser will be at depth 1. Return the previous depth:
return Some(Ok((t, self.depth - 1)));
}
_ => {}
}
Some(Ok((tok, self.depth)))
}
}
/// XmlTag Abstraction
///
/// ScopedDecoder represents a tag-scoped view into an XML document. Methods
/// on `ScopedDecoder` return `None` when the current tag has been exhausted.
pub struct ScopedDecoder<'inp, 'a> {
doc: &'a mut Document<'inp>,
start_el: StartEl<'inp>,
terminated: bool,
}
/// When a scoped decoder is dropped, its entire scope is consumed so that the
/// next read begins at the next tag at the same depth.
impl Drop for ScopedDecoder<'_, '_> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<'inp> ScopedDecoder<'inp, '_> {
/// The start element for this scope
pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> {
&self.start_el
}
/// Returns the next top-level tag in this scope
/// The returned reader will fully read the tag during its lifetime. If it is dropped without
/// the data being read, the reader will be advanced until the matching close tag. If you read
/// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`.
///
/// ```xml
/// <Response> <-- scoped reader on this tag
/// <A> <-- first call to next_tag returns this
/// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A`
/// <MoreNested>hello</MoreNested>
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> {
let next_tag = next_start_element(self)?;
Some(self.nested_decoder(next_tag))
}
fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: &mut self.doc,
start_el,
terminated: false,
}
}
}
impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next(&mut self) -> Option<Self::Item> {
if self.start_el.closed {
self.terminated = true;
}
if self.terminated {
return None;
}
let (tok, depth) = match self.doc.next() {
Some(Ok((tok, depth))) => (tok, depth),
other => return other,
};
match tok {
Token::ElementEnd { end,.. } if self.start_el.end_el(end, depth) => {
self.terminated = true;
return None;
}
_ => {}
}
Some(Ok((tok, depth)))
}
}
/// Load the next start element out of a depth-tagged token iterator
fn next_start_element<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Option<StartEl<'inp>> {
let mut out = StartEl::new("", "", 0);
loop {
match tokens.next()? {
Ok((Token::ElementStart { local, prefix,.. }, depth)) => {
out.name.local = local.as_str();
out.name.prefix = prefix.as_str();
out.depth = depth;
}
Ok((
Token::Attribute {
prefix,
local,
value,
..
},
_,
)) => out.attributes.push(Attr {
name: Name {
local: local.as_str(),
prefix: prefix.as_str(),
},
value: unescape(value.as_str()).ok()?,
}),
Ok((
Token::ElementEnd {
end: ElementEnd::Open,
..
},
_,
)) => break,
Ok((
Token::ElementEnd {
end: ElementEnd::Empty,
..
},
_,
)) => {
out.closed = true;
break;
}
_ => {}
}
}
Some(out)
}
/// Returns the data element at the current position
///
/// If the current position is not a data element (and is instead a <startelement>) an error
/// will be returned
pub fn try_data<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Result<Cow<'inp, str>, XmlError> {
loop {
match tokens.next().map(|opt| opt.map(|opt| opt.0)) {
None => return Ok(Cow::Borrowed("")),
Some(Ok(Token::Text { text })) => return unescape(text.as_str()),
Some(Ok(e @ Token::ElementStart {.. })) => {
return Err(XmlError::custom(format!(
"Looking for a data element, found: {:?}",
e
)))
}
Some(Err(e)) => return Err(e),
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::decode::{try_data, Attr, Depth, Document, Name, StartEl};
// test helper to create a closed startel
fn closed<'a>(local: &'a str, prefix: &'a str, depth: Depth) -> StartEl<'a> {
let mut s = StartEl::new(local, prefix, depth);
s.closed = true;
s
}
#[test]
fn scoped_tokens() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().expect("valid document");
assert_eq!(root.start_el().local(), "Response");
assert_eq!(root.next_tag().expect("tag exists").start_el().local(), "A");
assert!(root.next_tag().is_none());
}
#[test]
fn handle_depth_properly() {
let xml = r#"<Response><Response></Response><A/></Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid document");
assert_eq!(
scoped.next_tag().unwrap().start_el(),
&StartEl::new("Response", "", 1)
);
let closed_a = closed("A", "", 1);
assert_eq!(scoped.next_tag().unwrap().start_el(), &closed_a);
assert!(scoped.next_tag().is_none())
}
#[test]
fn self_closing() {
let xml = r#"<Response/>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid doc");
assert_eq!(scoped.start_el.closed, true);
assert!(scoped.next_tag().is_none())
}
#[test]
fn terminate_scope() {
let xml = r#"<Response><Struct><A></A><Also/></Struct><More/></Response>"#;
let mut doc = Document::new(xml);
let mut response_iter = doc.root_element().expect("valid doc");
let mut struct_iter = response_iter.next_tag().unwrap();
assert_eq!(
struct_iter.next_tag().as_ref().map(|t| t.start_el()),
Some(&StartEl::new("A", "", 2))
);
// When the inner iter is dropped, it will read to the end of its scope
// prevent accidental behavior where we didn't read a full node
drop(struct_iter);
assert_eq!(
response_iter.next_tag().unwrap().start_el(),
&closed("More", "", 1)
);
}
#[test]
fn read_data_invalid() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
try_data(&mut resp).expect_err("no data");
}
#[test]
fn read_data() {
let xml = r#"<Response>hello</Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), "hello");
}
/// Whitespace within an element is preserved
#[test]
fn read_data_whitespace() {
let xml = r#"<Response> hello </Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), " hello ");
}
#[test]
fn ignore_insignificant_whitespace() {
let xml = r#"<Response> <A> </A> </Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
let mut a = resp.next_tag().expect("should be a");
let data = try_data(&mut a).expect("valid");
assert_eq!(data, " ");
}
#[test]
fn read_attributes() {
let xml = r#"<Response xsi:type="CanonicalUser">hello</Response>"#;
let mut tokenizer = Document::new(xml);
let root = tokenizer.root_element().unwrap();
assert_eq!(
root.start_el().attributes,
vec![Attr {
name: Name {
prefix: "xsi".into(),
local: "type".into()
},
value: "CanonicalUser".into()
}]
)
}
#[test]
fn escape_data() {
let xml = r#"<Response key=""hey">">></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
assert_eq!(try_data(&mut root).unwrap(), ">");
assert_eq!(root.start_el().attr("key"), Some("\"hey\">"));
}
#[test]
fn nested_self_closer() {
let xml = r#"<XmlListsInputOutput>
<stringList/>
<stringSet></stringSet>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut string_list = root.next_tag().unwrap();
assert_eq!(string_list.start_el(), &closed("stringList", "", 1));
assert!(string_list.next_tag().is_none());
drop(string_list);
assert_eq!(
root.next_tag().unwrap().start_el(),
&StartEl::new("stringSet", "", 1)
);
}
#[test]
fn confusing_nested_same_name_tag() {
// an inner b which could be confused as closing the outer b if depth
// is not properly tracked:
let root_tags = &["a", "b", "c", "d"];
let xml = r#"<XmlListsInputOutput>
<a/>
<b>
<c/>
<b></b>
<here/>
</b>
<c></c>
<d>more</d>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut cmp = vec![];
while let Some(tag) = root.next_tag() {
cmp.push(tag.start_el().local().to_owned());
}
assert_eq!(root_tags, cmp.as_slice());
}
}
| Name | identifier_name |
decode.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use crate::unescape::unescape;
use std::borrow::Cow;
use std::convert::TryFrom;
use thiserror::Error;
use xmlparser::{ElementEnd, Token, Tokenizer};
pub type Depth = usize;
// in general, these errors are just for reporting what happened, there isn't
// much value in lots of different match variants
#[derive(Debug, Error)]
pub enum XmlError {
#[error("XML Parse Error")]
InvalidXml(#[from] xmlparser::Error),
#[error("Invalid XML Escape: {esc}")]
InvalidEscape { esc: String },
#[error("Error parsing XML: {0}")]
Custom(Cow<'static, str>),
#[error("Encountered another error parsing XML: {0}")]
Unhandled(#[from] Box<dyn std::error::Error + Send + Sync +'static>),
}
impl XmlError {
pub fn custom(msg: impl Into<Cow<'static, str>>) -> Self {
XmlError::Custom(msg.into())
}
}
#[derive(PartialEq, Debug)]
pub struct Name<'a> {
pub prefix: &'a str,
pub local: &'a str,
}
impl Name<'_> {
/// Check if a given name matches a tag name composed of `prefix:local` or just `local`
pub fn matches(&self, tag_name: &str) -> bool {
let split = tag_name.find(':');
match split {
None => tag_name == self.local,
Some(idx) => {
let (prefix, local) = tag_name.split_at(idx);
let local = &local[1..];
self.local == local && self.prefix == prefix
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct Attr<'a> {
name: Name<'a>,
// attribute values can be escaped (eg. with double quotes, so we need a Cow)
value: Cow<'a, str>,
}
#[derive(Debug, PartialEq)]
pub struct StartEl<'a> {
name: Name<'a>,
attributes: Vec<Attr<'a>>,
closed: bool,
depth: Depth,
}
/// Xml Start Element
///
/// ```xml
/// <a:b c="d">
/// ^^^ ^^^^^
/// name attributes
/// ```
impl<'a> StartEl<'a> {
pub fn depth(&self) -> Depth {
self.depth
}
fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self {
Self {
name: Name { prefix, local },
attributes: vec![],
closed: false,
depth,
}
}
/// Retrieve an attribute with a given key
///
/// key `prefix:local` combined as a str, joined by a `:`
pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> {
self.attributes
.iter()
.find(|attr| attr.name.matches(key))
.map(|attr| attr.value.as_ref())
}
/// Returns whether this `StartEl` matches a given name
/// in `prefix:local` form.
pub fn matches(&self, pat: &str) -> bool {
self.name.matches(pat)
}
/// Local component of this element's name
///
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn local(&self) -> &str |
/// Prefix component of this elements name (or empty string)
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn prefix(&self) -> &str {
self.name.prefix
}
/// Returns true of `el` at `depth` is a match for this `start_el`
fn end_el(&self, el: ElementEnd, depth: Depth) -> bool {
if depth!= self.depth {
return false;
}
match el {
ElementEnd::Open => false,
ElementEnd::Close(prefix, local) => {
prefix.as_str() == self.name.prefix && local.as_str() == self.name.local
}
ElementEnd::Empty => false,
}
}
}
/// Xml Document abstraction
///
/// This document wraps a lazy tokenizer with depth tracking.
/// Constructing a document is essentially free.
pub struct Document<'a> {
tokenizer: Tokenizer<'a>,
depth: Depth,
}
impl<'a> TryFrom<&'a [u8]> for Document<'a> {
type Error = XmlError;
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
Ok(Document::new(
std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?,
))
}
}
impl<'inp> Document<'inp> {
pub fn new(doc: &'inp str) -> Self {
Document {
tokenizer: Tokenizer::from(doc),
depth: 0,
}
}
/// "Depth first" iterator
///
/// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next
/// start element regardless of depth. This is useful to give a pointer into the middle
/// of a document to start reading.
///
/// ```xml
/// <Response> <-- first call returns this:
/// <A> <-- next call
/// <Nested /> <-- next call returns this
/// <MoreNested>hello</MoreNested> <-- then this:
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> {
next_start_element(self)
}
/// A scoped reader for the entire document
pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> {
let start_el = self
.next_start_element()
.ok_or_else(|| XmlError::custom("no root element"))?;
Ok(ScopedDecoder {
doc: self,
start_el,
terminated: false,
})
}
/// A scoped reader for a specific tag
///
/// This method is necessary for when you need to return a ScopedDecoder from a function
/// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference
/// to a field owned by the current function
pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: self,
start_el,
terminated: false,
}
}
}
/// Depth tracking iterator
///
/// ```xml
/// <a> <- startel depth 0
/// <b> <- startel depth 1
/// <c> <- startel depth 2
/// </c> <- endel depth 2
/// </b> <- endel depth 1
/// </a> <- endel depth 0
/// ```
impl<'inp> Iterator for Document<'inp> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> {
let tok = self.tokenizer.next()?;
let tok = match tok {
Err(e) => return Some(Err(e.into())),
Ok(tok) => tok,
};
// depth bookkeeping
match tok {
Token::ElementEnd {
end: ElementEnd::Close(_, _),
..
} => {
self.depth -= 1;
}
Token::ElementEnd {
end: ElementEnd::Empty,
..
} => self.depth -= 1,
t @ Token::ElementStart {.. } => {
self.depth += 1;
// We want the startel and endel to have the same depth, but after the opener,
// the parser will be at depth 1. Return the previous depth:
return Some(Ok((t, self.depth - 1)));
}
_ => {}
}
Some(Ok((tok, self.depth)))
}
}
/// XmlTag Abstraction
///
/// ScopedDecoder represents a tag-scoped view into an XML document. Methods
/// on `ScopedDecoder` return `None` when the current tag has been exhausted.
pub struct ScopedDecoder<'inp, 'a> {
doc: &'a mut Document<'inp>,
start_el: StartEl<'inp>,
terminated: bool,
}
/// When a scoped decoder is dropped, its entire scope is consumed so that the
/// next read begins at the next tag at the same depth.
impl Drop for ScopedDecoder<'_, '_> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<'inp> ScopedDecoder<'inp, '_> {
/// The start element for this scope
pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> {
&self.start_el
}
/// Returns the next top-level tag in this scope
/// The returned reader will fully read the tag during its lifetime. If it is dropped without
/// the data being read, the reader will be advanced until the matching close tag. If you read
/// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`.
///
/// ```xml
/// <Response> <-- scoped reader on this tag
/// <A> <-- first call to next_tag returns this
/// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A`
/// <MoreNested>hello</MoreNested>
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> {
let next_tag = next_start_element(self)?;
Some(self.nested_decoder(next_tag))
}
fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: &mut self.doc,
start_el,
terminated: false,
}
}
}
impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next(&mut self) -> Option<Self::Item> {
if self.start_el.closed {
self.terminated = true;
}
if self.terminated {
return None;
}
let (tok, depth) = match self.doc.next() {
Some(Ok((tok, depth))) => (tok, depth),
other => return other,
};
match tok {
Token::ElementEnd { end,.. } if self.start_el.end_el(end, depth) => {
self.terminated = true;
return None;
}
_ => {}
}
Some(Ok((tok, depth)))
}
}
/// Load the next start element out of a depth-tagged token iterator
fn next_start_element<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Option<StartEl<'inp>> {
let mut out = StartEl::new("", "", 0);
loop {
match tokens.next()? {
Ok((Token::ElementStart { local, prefix,.. }, depth)) => {
out.name.local = local.as_str();
out.name.prefix = prefix.as_str();
out.depth = depth;
}
Ok((
Token::Attribute {
prefix,
local,
value,
..
},
_,
)) => out.attributes.push(Attr {
name: Name {
local: local.as_str(),
prefix: prefix.as_str(),
},
value: unescape(value.as_str()).ok()?,
}),
Ok((
Token::ElementEnd {
end: ElementEnd::Open,
..
},
_,
)) => break,
Ok((
Token::ElementEnd {
end: ElementEnd::Empty,
..
},
_,
)) => {
out.closed = true;
break;
}
_ => {}
}
}
Some(out)
}
/// Returns the data element at the current position
///
/// If the current position is not a data element (and is instead a <startelement>) an error
/// will be returned
pub fn try_data<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Result<Cow<'inp, str>, XmlError> {
loop {
match tokens.next().map(|opt| opt.map(|opt| opt.0)) {
None => return Ok(Cow::Borrowed("")),
Some(Ok(Token::Text { text })) => return unescape(text.as_str()),
Some(Ok(e @ Token::ElementStart {.. })) => {
return Err(XmlError::custom(format!(
"Looking for a data element, found: {:?}",
e
)))
}
Some(Err(e)) => return Err(e),
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::decode::{try_data, Attr, Depth, Document, Name, StartEl};
// test helper to create a closed startel
fn closed<'a>(local: &'a str, prefix: &'a str, depth: Depth) -> StartEl<'a> {
let mut s = StartEl::new(local, prefix, depth);
s.closed = true;
s
}
#[test]
fn scoped_tokens() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().expect("valid document");
assert_eq!(root.start_el().local(), "Response");
assert_eq!(root.next_tag().expect("tag exists").start_el().local(), "A");
assert!(root.next_tag().is_none());
}
#[test]
fn handle_depth_properly() {
let xml = r#"<Response><Response></Response><A/></Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid document");
assert_eq!(
scoped.next_tag().unwrap().start_el(),
&StartEl::new("Response", "", 1)
);
let closed_a = closed("A", "", 1);
assert_eq!(scoped.next_tag().unwrap().start_el(), &closed_a);
assert!(scoped.next_tag().is_none())
}
#[test]
fn self_closing() {
let xml = r#"<Response/>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid doc");
assert_eq!(scoped.start_el.closed, true);
assert!(scoped.next_tag().is_none())
}
#[test]
fn terminate_scope() {
let xml = r#"<Response><Struct><A></A><Also/></Struct><More/></Response>"#;
let mut doc = Document::new(xml);
let mut response_iter = doc.root_element().expect("valid doc");
let mut struct_iter = response_iter.next_tag().unwrap();
assert_eq!(
struct_iter.next_tag().as_ref().map(|t| t.start_el()),
Some(&StartEl::new("A", "", 2))
);
// When the inner iter is dropped, it will read to the end of its scope
// prevent accidental behavior where we didn't read a full node
drop(struct_iter);
assert_eq!(
response_iter.next_tag().unwrap().start_el(),
&closed("More", "", 1)
);
}
#[test]
fn read_data_invalid() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
try_data(&mut resp).expect_err("no data");
}
#[test]
fn read_data() {
let xml = r#"<Response>hello</Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), "hello");
}
/// Whitespace within an element is preserved
#[test]
fn read_data_whitespace() {
let xml = r#"<Response> hello </Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), " hello ");
}
#[test]
fn ignore_insignificant_whitespace() {
let xml = r#"<Response> <A> </A> </Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
let mut a = resp.next_tag().expect("should be a");
let data = try_data(&mut a).expect("valid");
assert_eq!(data, " ");
}
#[test]
fn read_attributes() {
let xml = r#"<Response xsi:type="CanonicalUser">hello</Response>"#;
let mut tokenizer = Document::new(xml);
let root = tokenizer.root_element().unwrap();
assert_eq!(
root.start_el().attributes,
vec![Attr {
name: Name {
prefix: "xsi".into(),
local: "type".into()
},
value: "CanonicalUser".into()
}]
)
}
#[test]
fn escape_data() {
let xml = r#"<Response key=""hey">">></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
assert_eq!(try_data(&mut root).unwrap(), ">");
assert_eq!(root.start_el().attr("key"), Some("\"hey\">"));
}
#[test]
fn nested_self_closer() {
let xml = r#"<XmlListsInputOutput>
<stringList/>
<stringSet></stringSet>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut string_list = root.next_tag().unwrap();
assert_eq!(string_list.start_el(), &closed("stringList", "", 1));
assert!(string_list.next_tag().is_none());
drop(string_list);
assert_eq!(
root.next_tag().unwrap().start_el(),
&StartEl::new("stringSet", "", 1)
);
}
#[test]
fn confusing_nested_same_name_tag() {
// an inner b which could be confused as closing the outer b if depth
// is not properly tracked:
let root_tags = &["a", "b", "c", "d"];
let xml = r#"<XmlListsInputOutput>
<a/>
<b>
<c/>
<b></b>
<here/>
</b>
<c></c>
<d>more</d>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut cmp = vec![];
while let Some(tag) = root.next_tag() {
cmp.push(tag.start_el().local().to_owned());
}
assert_eq!(root_tags, cmp.as_slice());
}
}
| {
self.name.local
} | identifier_body |
decode.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use crate::unescape::unescape;
use std::borrow::Cow;
use std::convert::TryFrom;
use thiserror::Error;
use xmlparser::{ElementEnd, Token, Tokenizer};
pub type Depth = usize;
// in general, these errors are just for reporting what happened, there isn't
// much value in lots of different match variants
#[derive(Debug, Error)]
pub enum XmlError {
#[error("XML Parse Error")]
InvalidXml(#[from] xmlparser::Error),
#[error("Invalid XML Escape: {esc}")]
InvalidEscape { esc: String },
#[error("Error parsing XML: {0}")]
Custom(Cow<'static, str>),
#[error("Encountered another error parsing XML: {0}")]
Unhandled(#[from] Box<dyn std::error::Error + Send + Sync +'static>),
}
impl XmlError {
pub fn custom(msg: impl Into<Cow<'static, str>>) -> Self {
XmlError::Custom(msg.into())
}
}
#[derive(PartialEq, Debug)]
pub struct Name<'a> {
pub prefix: &'a str,
pub local: &'a str,
}
impl Name<'_> {
/// Check if a given name matches a tag name composed of `prefix:local` or just `local`
pub fn matches(&self, tag_name: &str) -> bool {
let split = tag_name.find(':');
match split {
None => tag_name == self.local,
Some(idx) => {
let (prefix, local) = tag_name.split_at(idx);
let local = &local[1..];
self.local == local && self.prefix == prefix
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct Attr<'a> {
name: Name<'a>,
// attribute values can be escaped (eg. with double quotes, so we need a Cow)
value: Cow<'a, str>,
}
#[derive(Debug, PartialEq)]
pub struct StartEl<'a> {
name: Name<'a>,
attributes: Vec<Attr<'a>>,
closed: bool,
depth: Depth,
}
/// Xml Start Element
///
/// ```xml
/// <a:b c="d">
/// ^^^ ^^^^^
/// name attributes
/// ```
impl<'a> StartEl<'a> {
pub fn depth(&self) -> Depth {
self.depth
}
fn new(local: &'a str, prefix: &'a str, depth: Depth) -> Self {
Self {
name: Name { prefix, local },
attributes: vec![],
closed: false,
depth,
}
}
/// Retrieve an attribute with a given key
///
/// key `prefix:local` combined as a str, joined by a `:`
pub fn attr<'b>(&'b self, key: &'b str) -> Option<&'b str> {
self.attributes
.iter()
.find(|attr| attr.name.matches(key))
.map(|attr| attr.value.as_ref())
}
/// Returns whether this `StartEl` matches a given name
/// in `prefix:local` form.
pub fn matches(&self, pat: &str) -> bool {
self.name.matches(pat)
}
/// Local component of this element's name
///
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn local(&self) -> &str {
self.name.local
}
/// Prefix component of this elements name (or empty string)
/// ```xml
/// <foo:bar>
/// ^^^
/// ```
pub fn prefix(&self) -> &str {
self.name.prefix
}
/// Returns true of `el` at `depth` is a match for this `start_el`
fn end_el(&self, el: ElementEnd, depth: Depth) -> bool {
if depth!= self.depth {
return false;
}
match el {
ElementEnd::Open => false,
ElementEnd::Close(prefix, local) => {
prefix.as_str() == self.name.prefix && local.as_str() == self.name.local
}
ElementEnd::Empty => false,
}
}
}
/// Xml Document abstraction
///
/// This document wraps a lazy tokenizer with depth tracking.
/// Constructing a document is essentially free.
pub struct Document<'a> {
tokenizer: Tokenizer<'a>,
depth: Depth,
}
impl<'a> TryFrom<&'a [u8]> for Document<'a> {
type Error = XmlError;
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
Ok(Document::new(
std::str::from_utf8(value).map_err(|err| XmlError::Unhandled(Box::new(err)))?,
))
}
}
impl<'inp> Document<'inp> {
pub fn new(doc: &'inp str) -> Self {
Document {
tokenizer: Tokenizer::from(doc),
depth: 0,
}
}
/// "Depth first" iterator
///
/// Unlike [`next_tag()`](ScopedDecoder::next_tag), this method returns the next
/// start element regardless of depth. This is useful to give a pointer into the middle
/// of a document to start reading.
///
/// ```xml
/// <Response> <-- first call returns this:
/// <A> <-- next call | /// <Nested /> <-- next call returns this
/// <MoreNested>hello</MoreNested> <-- then this:
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_start_element<'a>(&'a mut self) -> Option<StartEl<'inp>> {
next_start_element(self)
}
/// A scoped reader for the entire document
pub fn root_element<'a>(&'a mut self) -> Result<ScopedDecoder<'inp, 'a>, XmlError> {
let start_el = self
.next_start_element()
.ok_or_else(|| XmlError::custom("no root element"))?;
Ok(ScopedDecoder {
doc: self,
start_el,
terminated: false,
})
}
/// A scoped reader for a specific tag
///
/// This method is necessary for when you need to return a ScopedDecoder from a function
/// since normally the stacked-ownership that `next_tag()` uses would prevent returning a reference
/// to a field owned by the current function
pub fn scoped_to<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: self,
start_el,
terminated: false,
}
}
}
/// Depth tracking iterator
///
/// ```xml
/// <a> <- startel depth 0
/// <b> <- startel depth 1
/// <c> <- startel depth 2
/// </c> <- endel depth 2
/// </b> <- endel depth 1
/// </a> <- endel depth 0
/// ```
impl<'inp> Iterator for Document<'inp> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next<'a>(&'a mut self) -> Option<Result<(Token<'inp>, Depth), XmlError>> {
let tok = self.tokenizer.next()?;
let tok = match tok {
Err(e) => return Some(Err(e.into())),
Ok(tok) => tok,
};
// depth bookkeeping
match tok {
Token::ElementEnd {
end: ElementEnd::Close(_, _),
..
} => {
self.depth -= 1;
}
Token::ElementEnd {
end: ElementEnd::Empty,
..
} => self.depth -= 1,
t @ Token::ElementStart {.. } => {
self.depth += 1;
// We want the startel and endel to have the same depth, but after the opener,
// the parser will be at depth 1. Return the previous depth:
return Some(Ok((t, self.depth - 1)));
}
_ => {}
}
Some(Ok((tok, self.depth)))
}
}
/// XmlTag Abstraction
///
/// ScopedDecoder represents a tag-scoped view into an XML document. Methods
/// on `ScopedDecoder` return `None` when the current tag has been exhausted.
pub struct ScopedDecoder<'inp, 'a> {
doc: &'a mut Document<'inp>,
start_el: StartEl<'inp>,
terminated: bool,
}
/// When a scoped decoder is dropped, its entire scope is consumed so that the
/// next read begins at the next tag at the same depth.
impl Drop for ScopedDecoder<'_, '_> {
fn drop(&mut self) {
for _ in self {}
}
}
impl<'inp> ScopedDecoder<'inp, '_> {
/// The start element for this scope
pub fn start_el<'a>(&'a self) -> &'a StartEl<'inp> {
&self.start_el
}
/// Returns the next top-level tag in this scope
/// The returned reader will fully read the tag during its lifetime. If it is dropped without
/// the data being read, the reader will be advanced until the matching close tag. If you read
/// an element with `next_tag()` and you want to ignore it, simply drop the resulting `ScopeDecoder`.
///
/// ```xml
/// <Response> <-- scoped reader on this tag
/// <A> <-- first call to next_tag returns this
/// <Nested /> <-- to get inner data, call `next_tag` on the returned decoder for `A`
/// <MoreNested>hello</MoreNested>
/// </A>
/// <B/> <-- second call to next_tag returns this
/// </Response>
/// ```
pub fn next_tag<'a>(&'a mut self) -> Option<ScopedDecoder<'inp, 'a>> {
let next_tag = next_start_element(self)?;
Some(self.nested_decoder(next_tag))
}
fn nested_decoder<'a>(&'a mut self, start_el: StartEl<'inp>) -> ScopedDecoder<'inp, 'a> {
ScopedDecoder {
doc: &mut self.doc,
start_el,
terminated: false,
}
}
}
impl<'inp, 'a> Iterator for ScopedDecoder<'inp, 'a> {
type Item = Result<(Token<'inp>, Depth), XmlError>;
fn next(&mut self) -> Option<Self::Item> {
if self.start_el.closed {
self.terminated = true;
}
if self.terminated {
return None;
}
let (tok, depth) = match self.doc.next() {
Some(Ok((tok, depth))) => (tok, depth),
other => return other,
};
match tok {
Token::ElementEnd { end,.. } if self.start_el.end_el(end, depth) => {
self.terminated = true;
return None;
}
_ => {}
}
Some(Ok((tok, depth)))
}
}
/// Load the next start element out of a depth-tagged token iterator
fn next_start_element<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Option<StartEl<'inp>> {
let mut out = StartEl::new("", "", 0);
loop {
match tokens.next()? {
Ok((Token::ElementStart { local, prefix,.. }, depth)) => {
out.name.local = local.as_str();
out.name.prefix = prefix.as_str();
out.depth = depth;
}
Ok((
Token::Attribute {
prefix,
local,
value,
..
},
_,
)) => out.attributes.push(Attr {
name: Name {
local: local.as_str(),
prefix: prefix.as_str(),
},
value: unescape(value.as_str()).ok()?,
}),
Ok((
Token::ElementEnd {
end: ElementEnd::Open,
..
},
_,
)) => break,
Ok((
Token::ElementEnd {
end: ElementEnd::Empty,
..
},
_,
)) => {
out.closed = true;
break;
}
_ => {}
}
}
Some(out)
}
/// Returns the data element at the current position
///
/// If the current position is not a data element (and is instead a <startelement>) an error
/// will be returned
pub fn try_data<'a, 'inp>(
tokens: &'a mut impl Iterator<Item = Result<(Token<'inp>, Depth), XmlError>>,
) -> Result<Cow<'inp, str>, XmlError> {
loop {
match tokens.next().map(|opt| opt.map(|opt| opt.0)) {
None => return Ok(Cow::Borrowed("")),
Some(Ok(Token::Text { text })) => return unescape(text.as_str()),
Some(Ok(e @ Token::ElementStart {.. })) => {
return Err(XmlError::custom(format!(
"Looking for a data element, found: {:?}",
e
)))
}
Some(Err(e)) => return Err(e),
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::decode::{try_data, Attr, Depth, Document, Name, StartEl};
// test helper to create a closed startel
fn closed<'a>(local: &'a str, prefix: &'a str, depth: Depth) -> StartEl<'a> {
let mut s = StartEl::new(local, prefix, depth);
s.closed = true;
s
}
#[test]
fn scoped_tokens() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().expect("valid document");
assert_eq!(root.start_el().local(), "Response");
assert_eq!(root.next_tag().expect("tag exists").start_el().local(), "A");
assert!(root.next_tag().is_none());
}
#[test]
fn handle_depth_properly() {
let xml = r#"<Response><Response></Response><A/></Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid document");
assert_eq!(
scoped.next_tag().unwrap().start_el(),
&StartEl::new("Response", "", 1)
);
let closed_a = closed("A", "", 1);
assert_eq!(scoped.next_tag().unwrap().start_el(), &closed_a);
assert!(scoped.next_tag().is_none())
}
#[test]
fn self_closing() {
let xml = r#"<Response/>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().expect("valid doc");
assert_eq!(scoped.start_el.closed, true);
assert!(scoped.next_tag().is_none())
}
#[test]
fn terminate_scope() {
let xml = r#"<Response><Struct><A></A><Also/></Struct><More/></Response>"#;
let mut doc = Document::new(xml);
let mut response_iter = doc.root_element().expect("valid doc");
let mut struct_iter = response_iter.next_tag().unwrap();
assert_eq!(
struct_iter.next_tag().as_ref().map(|t| t.start_el()),
Some(&StartEl::new("A", "", 2))
);
// When the inner iter is dropped, it will read to the end of its scope
// prevent accidental behavior where we didn't read a full node
drop(struct_iter);
assert_eq!(
response_iter.next_tag().unwrap().start_el(),
&closed("More", "", 1)
);
}
#[test]
fn read_data_invalid() {
let xml = r#"<Response><A></A></Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
try_data(&mut resp).expect_err("no data");
}
#[test]
fn read_data() {
let xml = r#"<Response>hello</Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), "hello");
}
/// Whitespace within an element is preserved
#[test]
fn read_data_whitespace() {
let xml = r#"<Response> hello </Response>"#;
let mut doc = Document::new(xml);
let mut scoped = doc.root_element().unwrap();
assert_eq!(try_data(&mut scoped).unwrap(), " hello ");
}
#[test]
fn ignore_insignificant_whitespace() {
let xml = r#"<Response> <A> </A> </Response>"#;
let mut doc = Document::new(xml);
let mut resp = doc.root_element().unwrap();
let mut a = resp.next_tag().expect("should be a");
let data = try_data(&mut a).expect("valid");
assert_eq!(data, " ");
}
#[test]
fn read_attributes() {
let xml = r#"<Response xsi:type="CanonicalUser">hello</Response>"#;
let mut tokenizer = Document::new(xml);
let root = tokenizer.root_element().unwrap();
assert_eq!(
root.start_el().attributes,
vec![Attr {
name: Name {
prefix: "xsi".into(),
local: "type".into()
},
value: "CanonicalUser".into()
}]
)
}
#[test]
fn escape_data() {
let xml = r#"<Response key=""hey">">></Response>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
assert_eq!(try_data(&mut root).unwrap(), ">");
assert_eq!(root.start_el().attr("key"), Some("\"hey\">"));
}
#[test]
fn nested_self_closer() {
let xml = r#"<XmlListsInputOutput>
<stringList/>
<stringSet></stringSet>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut string_list = root.next_tag().unwrap();
assert_eq!(string_list.start_el(), &closed("stringList", "", 1));
assert!(string_list.next_tag().is_none());
drop(string_list);
assert_eq!(
root.next_tag().unwrap().start_el(),
&StartEl::new("stringSet", "", 1)
);
}
#[test]
fn confusing_nested_same_name_tag() {
// an inner b which could be confused as closing the outer b if depth
// is not properly tracked:
let root_tags = &["a", "b", "c", "d"];
let xml = r#"<XmlListsInputOutput>
<a/>
<b>
<c/>
<b></b>
<here/>
</b>
<c></c>
<d>more</d>
</XmlListsInputOutput>"#;
let mut doc = Document::new(xml);
let mut root = doc.root_element().unwrap();
let mut cmp = vec![];
while let Some(tag) = root.next_tag() {
cmp.push(tag.start_el().local().to_owned());
}
assert_eq!(root_tags, cmp.as_slice());
}
} | random_line_split |
|
channel_router.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp;
use std::collections::HashMap;
struct Ranges {
ranges: Vec<std::ops::Range<usize>>,
}
impl Ranges {
fn new() -> Self {
Ranges { ranges: Vec::new() }
}
fn add(&mut self, start: usize, end: usize) {
let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1);
self.ranges.push(std::ops::Range { start, end });
}
fn contains(&self, start: usize, end: usize) -> bool {
let (start, end) = (cmp::min(start, end), cmp::max(start, end));
(start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v)))
}
fn contains_range(&self, range: &std::ops::Range<usize>) -> bool {
self.contains(range.start, range.end)
}
fn range_sum(&self) -> usize {
self.ranges.iter().map(|r| r.end - r.start).sum()
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelState {
Free,
// Occupied means no connection. This is the same as a constant false.
Occupied,
// Constant true.
Constant,
Net(usize),
}
pub type ChannelLayout = [ChannelState];
impl ChannelState {
pub fn is_free(&self) -> bool {
self == &ChannelState::Free
}
pub fn contains_net(&self) -> bool {
matches!(self, ChannelState::Net(_))
}
pub fn is_constant_on(&self) -> bool {
matches!(self, ChannelState::Constant)
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ChannelOp {
Move,
Copy,
}
#[derive(Debug, Clone)]
pub struct WireConnection {
pub from: usize,
pub to: Vec<usize>,
pub mode: ChannelOp,
}
#[derive(Debug)]
pub struct ChannelSubState {
pub wires: Vec<WireConnection>,
pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>,
}
#[derive(Debug)]
struct Task {
net: usize,
from: usize,
to: Vec<usize>,
}
impl Task {
fn channel_range_required(&self) -> std::ops::Range<usize> {
let from = [self.from];
let min = self.to.iter().chain(&from).min().unwrap();
let max = self.to.iter().chain(&from).max().unwrap();
std::ops::Range {
start: *min,
end: max + 1,
}
}
fn channel_width_required(&self) -> usize {
let r = self.channel_range_required();
r.end - r.start
}
fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> {
let mut occupied = Vec::new();
for &idx in &self.to {
if layout[idx].contains_net() && layout[idx]!= ChannelState::Net(self.net) {
occupied.push(idx);
}
}
occupied
}
// Returns how 'good' a new 'from' position is for this task (when evicting)
// so that we can prefer nice spots.
fn eviction_cost(&self, new_pos: usize) -> usize {
let min = self.to.iter().min().unwrap();
let max = self.to.iter().max().unwrap();
let dist = (self.from as isize - new_pos as isize).abs() as usize;
if new_pos > *max {
2 * (new_pos - *max) + dist
} else if new_pos < *min {
2 * (*min - new_pos) + dist
} else {
dist
}
}
}
#[derive(Default)]
struct RouteTasks {
// source idx -> vec<target idx>
tasks: HashMap<usize, Vec<usize>>,
}
impl RouteTasks {
fn add(&mut self, from: usize, to: usize) {
if let Some(k) = self.tasks.get_mut(&from) | else {
self.tasks.insert(from, vec![to]);
}
}
fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> {
self.tasks
.drain()
.map(|(k, v)| {
let net = match src[k] {
ChannelState::Net(i) => i,
_ => unreachable!(),
};
Task {
net,
from: k,
to: v,
}
})
.collect::<Vec<_>>()
}
}
pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> {
let mut state = start.to_owned();
// Expand the state to be at least end.len() wide.
while state.len() < end.len() {
state.push(ChannelState::Free);
}
let mut tasks = RouteTasks::default();
for end_idx in 0..end.len() {
if!end[end_idx].contains_net() || end[end_idx] == state[end_idx] {
continue;
}
let state_idx = state
.iter()
.position(|v| v == &end[end_idx])
.unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx]));
tasks.add(state_idx, end_idx);
}
let mut tasks = tasks.into_tasks(&state);
// Order by how much of the channel this task occupies.
tasks.sort_by_key(|k| k.channel_width_required());
let mut steps: Vec<ChannelSubState> = Vec::new();
loop {
// Ranges of the channel that is currently occupied.
let mut ranges = Ranges::new();
// Instruction on how to connect pins in the current part of the channel.
let mut wires = Vec::new();
// To detect if we were unable to do anything due to blocked pins.
let old_task_len = tasks.len();
tasks = tasks
.drain(0..tasks.len())
.filter(|task| {
// Speed things up by only 'enforcing' 50% channel utilization.
if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) {
return true;
}
// Do we have the required part of the channel available?
if ranges.contains_range(&task.channel_range_required()) {
return true;
}
let blocking_pins = task.occupied_target_pins(&state);
if blocking_pins.is_empty() {
// Targets are free, directly move (or copy) it there.
let keep = if task.from >= end.len() || state[task.from]!= end[task.from] {
state[task.from] = ChannelState::Free;
false
} else {
true
};
wires.push(WireConnection {
from: task.from,
to: task.to.clone(),
mode: if keep {
ChannelOp::Copy
} else {
ChannelOp::Move
},
});
let r = task.channel_range_required();
// -1 here since.add() + channel_range_required() will do +1.
ranges.add(r.start, r.end - 1);
for &to in &task.to {
state[to] = ChannelState::Net(task.net);
}
// We successfully handled this one.
return false;
}
true
})
.collect::<Vec<_>>();
// We were unable to handle any tasks -> we need to evict some channels.
if old_task_len == tasks.len() {
// Find available positions where we can evict to.
let mut free_positions = state
.iter()
.enumerate()
.filter(|(_, v)|!v.contains_net())
.map(|(k, _)| k)
.filter(|&k| k >= end.len() ||!end[k].contains_net())
.collect::<Vec<_>>();
if free_positions.is_empty() {
println!("[!] No free positions found, expanding channel");
// Make sure that we have some room, scaling with the number of
// remaining tasks as a random tradeoff.
for _ in 0..(tasks.len() / 10 + 1) {
state.push(ChannelState::Free);
free_positions.push(state.len() - 1);
}
}
for task_idx in 0..tasks.len() {
let blocking_pins = tasks[task_idx].occupied_target_pins(&state);
for to_evict in blocking_pins {
// Find corresponding task.
let task_idx_to_evict = tasks
.iter()
.position(|t| t.from == to_evict)
.unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict));
// Find a good place for this task to evict to.
free_positions.sort_by(|&a, &b| {
// Comparing in the opposite order on purpose here so
// that we can use pop() later.
tasks[task_idx_to_evict]
.eviction_cost(b)
.cmp(&tasks[task_idx_to_evict].eviction_cost(a))
});
let from = tasks[task_idx_to_evict].from;
let new_pos = *free_positions.last().unwrap();
// Check whether the space is actually available.
let req_range = std::ops::Range {
start: cmp::min(from, new_pos),
end: cmp::max(from, new_pos) + 1,
};
if!ranges.contains_range(&req_range) {
free_positions.pop();
ranges.add(from, new_pos);
wires.push(WireConnection {
from,
to: vec![new_pos],
mode: ChannelOp::Move,
});
tasks[task_idx_to_evict].from = new_pos;
state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net);
state[to_evict] = ChannelState::Free;
}
}
}
}
let mut bitmap =
bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64])
.unwrap();
for idx in state
.iter()
.enumerate()
.filter(|(_, v)| v.contains_net())
.map(|(k, _)| k)
{
bitmap.set(idx, 1);
}
steps.push(ChannelSubState {
wires,
occupancy_map: bitmap,
});
if tasks.is_empty() {
return steps;
}
}
}
| {
k.push(to);
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.