file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
samplesheet.rs | //! This module contains tools to build sample sheets from lists of samples,
//! and to export sample sheets to ARResT-compatible formats.
use std::{collections::HashMap, convert::TryInto, fs::File, io::Write, path::{Path, PathBuf}};
use std::error::Error;
use crate::{models, vaultdb::MatchStatus};
use calamine::{Reader, Xlsx, open_workbook};
use diesel::{PgConnection, QueryDsl, RunQueryDsl, ExpressionMethods};
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
/// A catch-all error type
type Result<T> = std::result::Result<T, Box<dyn Error>>;
/// A sample sheet containing a list of samples
#[derive(Debug)]
pub struct SampleSheet {
/// The entries of the sample sheet
pub entries: Vec<SampleSheetEntry>,
}
/// An entry of a SampleSheet
#[derive(Debug,Default)]
pub struct SampleSheetEntry {
/// Sample data accoring to the database
pub model: models::Sample,
/// Columns usually imported from an external sample sheet.
/// These entries can overlap with basic data. During export,
/// the `override` settings control which one to use.
pub extra_cols: HashMap<String, String>
}
/// Convert DNA numbers to XX-XXXXX format, will be filled up with zeros if necessary.
///
/// If a DNA number is in a supported format, it will be normalized to a two-digit year
/// enconding, a dash sign `-` and a five-digit number. A supported input format
/// * may or may not start with a `D-` prefix
/// * must contain a number, dash, number sequence
///
/// If `dnanr` is not in a supported format, `None` is returned.
///
/// # Example
/// ```
/// assert_eq!(Some("01-12345"), normalize_dna_nr("01-12345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("01-345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("D-1-345"))
/// assert_eq!(None, normalize_dna_nr("asdfjklö"))
/// ```
pub(crate) fn normalize_dna_nr(dnanr: &str) -> Option<String> {
let dnanr = dnanr.strip_prefix("D-").unwrap_or(dnanr);
let parts: Vec<&str> = dnanr.split('-').collect();
if parts.len()!= 2 {
return None;
}
Some(format!(
"{:02}-{:05}",
parts[0].parse::<u32>().unwrap(),
parts[1].parse::<u32>().unwrap()
))
}
impl SampleSheetEntry {
pub fn _run_path(&self, db: &PgConnection) -> Result<PathBuf> {
use crate::schema::run;
let p: String = run::table.select(run::path).filter(run::name.eq(&self.model.run)).get_result(db)?;
Ok(PathBuf::from(p))
}
pub fn fastq_paths(&self, db: &PgConnection) -> Result<Vec<String>> {
use crate::schema::fastq;
Ok(fastq::table.select(fastq::filename).filter(fastq::sample_id.eq(self.model.id)).load(db)?)
}
// generate a short but unique string representation of the run
// to keep samples with same characteristics in different runs apart
fn get_unique_run_id(&self) -> String {
let underscore_parts: Vec<&str> = self.model.run.split('_').collect();
let dash_parts: Vec<&str> = self.model.run.split('-').collect();
format!("{}-{}", underscore_parts[0], dash_parts[dash_parts.len()-1])
}
}
impl From<models::Sample> for SampleSheetEntry {
fn from(s: models::Sample) -> Self {
SampleSheetEntry {
model: s,
extra_cols: HashMap::new()
}
}
}
impl From<Vec<models::Sample>> for SampleSheet {
fn from(ss: Vec<models::Sample>) -> Self { | }
}
fn extract_from_zip(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let zipfile = std::fs::File::open(path)?;
let mut zip = zip::ZipArchive::new(zipfile)?;
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut fastq = zip.by_name(f)?;
let target = PathBuf::from(fastq.name());
let mut local_path = PathBuf::from(targetdir);
local_path.push(prefix.clone() + &target.file_name().unwrap().to_string_lossy().to_string());
let mut targetfile = std::fs::File::create(local_path)?;
std::io::copy(&mut fastq, &mut targetfile)?;
}
Ok(())
}
fn extract_from_dir(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut src = path.to_path_buf();
src.push(f);
let mut target = PathBuf::from(targetdir);
target.push(prefix.clone() + &PathBuf::from(f).file_name().unwrap().to_string_lossy().to_string());
std::fs::copy(&src, &target)?;
}
Ok(())
}
impl SampleSheet {
pub fn new() -> Self {
SampleSheet {
entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if!all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e in &self.entries {
// write basic data points
for (col_idx, col) in basic_header.iter().enumerate() {
let last = col_idx+1 == basic_header.len();
if overrides.iter().any(|x| &x.as_ref() == col) {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
} else {
match *col {
"Sample" => {
if has_multiple_runs {
csv += &format!("{}-{}", e.get_unique_run_id(), e.model.name);
} else {
csv += &e.model.name;
}
},
"run" => { csv += &e.model.run; },
"DNA nr" => { csv += &e.model.dna_nr.as_ref().unwrap_or(&String::from("")); },
"primer set" => { csv += e.model.primer_set.as_ref().unwrap_or(&String::from("")); },
"project" => { csv += &e.model.project.as_ref().unwrap_or(&String::from("")); },
"LIMS ID" => { csv += &e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")); },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
csv += &cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*col) {
csv += cells
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
if!last {
csv += separator;
}
}
if!all_sans_basic.is_empty() {
csv += separator;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
if col_idx+1 < all_sans_basic.len() {
csv += separator;
}
}
csv += "\n";
}
File::create(outfile)?.write_all(csv.as_bytes())?;
Ok(())
}
pub fn write_xlsx<T: AsRef<str> + PartialEq> (&self, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// set up an empty file
let workbook = xlsxwriter::Workbook::new(outfile.to_str().unwrap());
let mut sheet = workbook.add_worksheet(None)?;
// write header
for (col, title) in basic_header.iter().chain(all_sans_basic.iter()).enumerate() {
sheet.write_string(0, col.clamp(0, u16::MAX.into()) as u16, title, None)?;
}
let has_multiple_runs = self.has_multiple_runs();
for (row, e) in self.entries.iter().enumerate() {
let row: u32 = (row + 1).try_into().unwrap();
// write basic data points
for (col_idx, colname) in basic_header.iter().enumerate() {
let col_idx: u16 = col_idx.try_into().unwrap();
let val = if overrides.iter().any(|x| &x.as_ref() == colname) {
e.extra_cols.get(*colname).unwrap_or(&String::from("")).to_string()
} else {
match *colname {
"Sample" => {
if has_multiple_runs {
format!("{}-{}", e.get_unique_run_id(), e.model.name)
} else {
e.model.name.to_string()
}
},
"run" => { e.model.run.to_string() },
"DNA nr" => { e.model.dna_nr.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"primer set" => { e.model.primer_set.as_ref().unwrap_or(&String::from("")).to_string() },
"project" => { e.model.project.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"LIMS ID" => { e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")) },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*colname) {
cells.to_string()
} else {
String::from("")
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
sheet.write_string(row, col_idx, &val, None)?;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
let col_idx: u16 = (basic_header.len() + col_idx).try_into().unwrap();
sheet.write_string(row, col_idx, e.extra_cols.get(*col).unwrap_or(&String::from("")), None)?;
}
}
Ok(())
}
} | SampleSheet {
entries: ss.into_iter().map(|s| s.into()).collect()
} | random_line_split |
samplesheet.rs | //! This module contains tools to build sample sheets from lists of samples,
//! and to export sample sheets to ARResT-compatible formats.
use std::{collections::HashMap, convert::TryInto, fs::File, io::Write, path::{Path, PathBuf}};
use std::error::Error;
use crate::{models, vaultdb::MatchStatus};
use calamine::{Reader, Xlsx, open_workbook};
use diesel::{PgConnection, QueryDsl, RunQueryDsl, ExpressionMethods};
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
/// A catch-all error type
type Result<T> = std::result::Result<T, Box<dyn Error>>;
/// A sample sheet containing a list of samples
#[derive(Debug)]
pub struct SampleSheet {
/// The entries of the sample sheet
pub entries: Vec<SampleSheetEntry>,
}
/// An entry of a SampleSheet
#[derive(Debug,Default)]
pub struct SampleSheetEntry {
/// Sample data accoring to the database
pub model: models::Sample,
/// Columns usually imported from an external sample sheet.
/// These entries can overlap with basic data. During export,
/// the `override` settings control which one to use.
pub extra_cols: HashMap<String, String>
}
/// Convert DNA numbers to XX-XXXXX format, will be filled up with zeros if necessary.
///
/// If a DNA number is in a supported format, it will be normalized to a two-digit year
/// enconding, a dash sign `-` and a five-digit number. A supported input format
/// * may or may not start with a `D-` prefix
/// * must contain a number, dash, number sequence
///
/// If `dnanr` is not in a supported format, `None` is returned.
///
/// # Example
/// ```
/// assert_eq!(Some("01-12345"), normalize_dna_nr("01-12345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("01-345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("D-1-345"))
/// assert_eq!(None, normalize_dna_nr("asdfjklö"))
/// ```
pub(crate) fn normalize_dna_nr(dnanr: &str) -> Option<String> {
let dnanr = dnanr.strip_prefix("D-").unwrap_or(dnanr);
let parts: Vec<&str> = dnanr.split('-').collect();
if parts.len()!= 2 {
return None;
}
Some(format!(
"{:02}-{:05}",
parts[0].parse::<u32>().unwrap(),
parts[1].parse::<u32>().unwrap()
))
}
impl SampleSheetEntry {
pub fn _run_path(&self, db: &PgConnection) -> Result<PathBuf> {
use crate::schema::run;
let p: String = run::table.select(run::path).filter(run::name.eq(&self.model.run)).get_result(db)?;
Ok(PathBuf::from(p))
}
pub fn f | &self, db: &PgConnection) -> Result<Vec<String>> {
use crate::schema::fastq;
Ok(fastq::table.select(fastq::filename).filter(fastq::sample_id.eq(self.model.id)).load(db)?)
}
// generate a short but unique string representation of the run
// to keep samples with same characteristics in different runs apart
fn get_unique_run_id(&self) -> String {
let underscore_parts: Vec<&str> = self.model.run.split('_').collect();
let dash_parts: Vec<&str> = self.model.run.split('-').collect();
format!("{}-{}", underscore_parts[0], dash_parts[dash_parts.len()-1])
}
}
impl From<models::Sample> for SampleSheetEntry {
fn from(s: models::Sample) -> Self {
SampleSheetEntry {
model: s,
extra_cols: HashMap::new()
}
}
}
impl From<Vec<models::Sample>> for SampleSheet {
fn from(ss: Vec<models::Sample>) -> Self {
SampleSheet {
entries: ss.into_iter().map(|s| s.into()).collect()
}
}
}
fn extract_from_zip(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let zipfile = std::fs::File::open(path)?;
let mut zip = zip::ZipArchive::new(zipfile)?;
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut fastq = zip.by_name(f)?;
let target = PathBuf::from(fastq.name());
let mut local_path = PathBuf::from(targetdir);
local_path.push(prefix.clone() + &target.file_name().unwrap().to_string_lossy().to_string());
let mut targetfile = std::fs::File::create(local_path)?;
std::io::copy(&mut fastq, &mut targetfile)?;
}
Ok(())
}
fn extract_from_dir(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut src = path.to_path_buf();
src.push(f);
let mut target = PathBuf::from(targetdir);
target.push(prefix.clone() + &PathBuf::from(f).file_name().unwrap().to_string_lossy().to_string());
std::fs::copy(&src, &target)?;
}
Ok(())
}
impl SampleSheet {
pub fn new() -> Self {
SampleSheet {
entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if!all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e in &self.entries {
// write basic data points
for (col_idx, col) in basic_header.iter().enumerate() {
let last = col_idx+1 == basic_header.len();
if overrides.iter().any(|x| &x.as_ref() == col) {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
} else {
match *col {
"Sample" => {
if has_multiple_runs {
csv += &format!("{}-{}", e.get_unique_run_id(), e.model.name);
} else {
csv += &e.model.name;
}
},
"run" => { csv += &e.model.run; },
"DNA nr" => { csv += &e.model.dna_nr.as_ref().unwrap_or(&String::from("")); },
"primer set" => { csv += e.model.primer_set.as_ref().unwrap_or(&String::from("")); },
"project" => { csv += &e.model.project.as_ref().unwrap_or(&String::from("")); },
"LIMS ID" => { csv += &e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")); },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
csv += &cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*col) {
csv += cells
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
if!last {
csv += separator;
}
}
if!all_sans_basic.is_empty() {
csv += separator;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
if col_idx+1 < all_sans_basic.len() {
csv += separator;
}
}
csv += "\n";
}
File::create(outfile)?.write_all(csv.as_bytes())?;
Ok(())
}
pub fn write_xlsx<T: AsRef<str> + PartialEq> (&self, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// set up an empty file
let workbook = xlsxwriter::Workbook::new(outfile.to_str().unwrap());
let mut sheet = workbook.add_worksheet(None)?;
// write header
for (col, title) in basic_header.iter().chain(all_sans_basic.iter()).enumerate() {
sheet.write_string(0, col.clamp(0, u16::MAX.into()) as u16, title, None)?;
}
let has_multiple_runs = self.has_multiple_runs();
for (row, e) in self.entries.iter().enumerate() {
let row: u32 = (row + 1).try_into().unwrap();
// write basic data points
for (col_idx, colname) in basic_header.iter().enumerate() {
let col_idx: u16 = col_idx.try_into().unwrap();
let val = if overrides.iter().any(|x| &x.as_ref() == colname) {
e.extra_cols.get(*colname).unwrap_or(&String::from("")).to_string()
} else {
match *colname {
"Sample" => {
if has_multiple_runs {
format!("{}-{}", e.get_unique_run_id(), e.model.name)
} else {
e.model.name.to_string()
}
},
"run" => { e.model.run.to_string() },
"DNA nr" => { e.model.dna_nr.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"primer set" => { e.model.primer_set.as_ref().unwrap_or(&String::from("")).to_string() },
"project" => { e.model.project.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"LIMS ID" => { e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")) },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*colname) {
cells.to_string()
} else {
String::from("")
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
sheet.write_string(row, col_idx, &val, None)?;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
let col_idx: u16 = (basic_header.len() + col_idx).try_into().unwrap();
sheet.write_string(row, col_idx, e.extra_cols.get(*col).unwrap_or(&String::from("")), None)?;
}
}
Ok(())
}
}
| astq_paths( | identifier_name |
samplesheet.rs | //! This module contains tools to build sample sheets from lists of samples,
//! and to export sample sheets to ARResT-compatible formats.
use std::{collections::HashMap, convert::TryInto, fs::File, io::Write, path::{Path, PathBuf}};
use std::error::Error;
use crate::{models, vaultdb::MatchStatus};
use calamine::{Reader, Xlsx, open_workbook};
use diesel::{PgConnection, QueryDsl, RunQueryDsl, ExpressionMethods};
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
/// A catch-all error type
type Result<T> = std::result::Result<T, Box<dyn Error>>;
/// A sample sheet containing a list of samples
#[derive(Debug)]
pub struct SampleSheet {
/// The entries of the sample sheet
pub entries: Vec<SampleSheetEntry>,
}
/// An entry of a SampleSheet
#[derive(Debug,Default)]
pub struct SampleSheetEntry {
/// Sample data accoring to the database
pub model: models::Sample,
/// Columns usually imported from an external sample sheet.
/// These entries can overlap with basic data. During export,
/// the `override` settings control which one to use.
pub extra_cols: HashMap<String, String>
}
/// Convert DNA numbers to XX-XXXXX format, will be filled up with zeros if necessary.
///
/// If a DNA number is in a supported format, it will be normalized to a two-digit year
/// enconding, a dash sign `-` and a five-digit number. A supported input format
/// * may or may not start with a `D-` prefix
/// * must contain a number, dash, number sequence
///
/// If `dnanr` is not in a supported format, `None` is returned.
///
/// # Example
/// ```
/// assert_eq!(Some("01-12345"), normalize_dna_nr("01-12345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("01-345"))
/// assert_eq!(Some("01-00123"), normalize_dna_nr("D-1-345"))
/// assert_eq!(None, normalize_dna_nr("asdfjklö"))
/// ```
pub(crate) fn normalize_dna_nr(dnanr: &str) -> Option<String> {
let dnanr = dnanr.strip_prefix("D-").unwrap_or(dnanr);
let parts: Vec<&str> = dnanr.split('-').collect();
if parts.len()!= 2 {
return None;
}
Some(format!(
"{:02}-{:05}",
parts[0].parse::<u32>().unwrap(),
parts[1].parse::<u32>().unwrap()
))
}
impl SampleSheetEntry {
pub fn _run_path(&self, db: &PgConnection) -> Result<PathBuf> {
use crate::schema::run;
let p: String = run::table.select(run::path).filter(run::name.eq(&self.model.run)).get_result(db)?;
Ok(PathBuf::from(p))
}
pub fn fastq_paths(&self, db: &PgConnection) -> Result<Vec<String>> {
use crate::schema::fastq;
Ok(fastq::table.select(fastq::filename).filter(fastq::sample_id.eq(self.model.id)).load(db)?)
}
// generate a short but unique string representation of the run
// to keep samples with same characteristics in different runs apart
fn get_unique_run_id(&self) -> String {
let underscore_parts: Vec<&str> = self.model.run.split('_').collect();
let dash_parts: Vec<&str> = self.model.run.split('-').collect();
format!("{}-{}", underscore_parts[0], dash_parts[dash_parts.len()-1])
}
}
impl From<models::Sample> for SampleSheetEntry {
fn from(s: models::Sample) -> Self {
SampleSheetEntry {
model: s,
extra_cols: HashMap::new()
}
}
}
impl From<Vec<models::Sample>> for SampleSheet {
fn from(ss: Vec<models::Sample>) -> Self {
SampleSheet {
entries: ss.into_iter().map(|s| s.into()).collect()
}
}
}
fn extract_from_zip(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let zipfile = std::fs::File::open(path)?;
let mut zip = zip::ZipArchive::new(zipfile)?;
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut fastq = zip.by_name(f)?;
let target = PathBuf::from(fastq.name());
let mut local_path = PathBuf::from(targetdir);
local_path.push(prefix.clone() + &target.file_name().unwrap().to_string_lossy().to_string());
let mut targetfile = std::fs::File::create(local_path)?;
std::io::copy(&mut fastq, &mut targetfile)?;
}
Ok(())
}
fn extract_from_dir(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut src = path.to_path_buf();
src.push(f);
let mut target = PathBuf::from(targetdir);
target.push(prefix.clone() + &PathBuf::from(f).file_name().unwrap().to_string_lossy().to_string());
std::fs::copy(&src, &target)?;
}
Ok(())
}
impl SampleSheet {
pub fn new() -> Self {
SampleSheet {
entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if!all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e in &self.entries {
// write basic data points
for (col_idx, col) in basic_header.iter().enumerate() {
let last = col_idx+1 == basic_header.len();
if overrides.iter().any(|x| &x.as_ref() == col) {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
} else {
match *col {
"Sample" => {
if has_multiple_runs {
csv += &format!("{}-{}", e.get_unique_run_id(), e.model.name);
} else {
csv += &e.model.name;
}
},
"run" => { csv += &e.model.run; },
"DNA nr" => { csv += &e.model.dna_nr.as_ref().unwrap_or(&String::from("")); },
"primer set" => { csv += e.model.primer_set.as_ref().unwrap_or(&String::from("")); },
"project" => { csv += &e.model.project.as_ref().unwrap_or(&String::from("")); },
"LIMS ID" => { csv += &e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")); },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
csv += &cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*col) {
csv += cells
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
if!last {
csv += separator;
}
}
if!all_sans_basic.is_empty() {
csv += separator;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
if col_idx+1 < all_sans_basic.len() {
csv += separator;
}
}
csv += "\n";
}
File::create(outfile)?.write_all(csv.as_bytes())?;
Ok(())
}
pub fn write_xlsx<T: AsRef<str> + PartialEq> (&self, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h|!basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// set up an empty file
let workbook = xlsxwriter::Workbook::new(outfile.to_str().unwrap());
let mut sheet = workbook.add_worksheet(None)?;
// write header
for (col, title) in basic_header.iter().chain(all_sans_basic.iter()).enumerate() {
sheet.write_string(0, col.clamp(0, u16::MAX.into()) as u16, title, None)?;
}
let has_multiple_runs = self.has_multiple_runs();
for (row, e) in self.entries.iter().enumerate() {
let row: u32 = (row + 1).try_into().unwrap();
// write basic data points
for (col_idx, colname) in basic_header.iter().enumerate() {
let col_idx: u16 = col_idx.try_into().unwrap();
let val = if overrides.iter().any(|x| &x.as_ref() == colname) {
e.extra_cols.get(*colname).unwrap_or(&String::from("")).to_string()
} else {
match *colname {
"Sample" => {
if has_multiple_runs {
format!("{}-{}", e.get_unique_run_id(), e.model.name)
} else {
e.model.name.to_string()
}
},
"run" => { e.model.run.to_string() },
"DNA nr" => { |
"primer set" => { e.model.primer_set.as_ref().unwrap_or(&String::from("")).to_string() },
"project" => { e.model.project.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) },
"LIMS ID" => { e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")) },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*colname) {
cells.to_string()
} else {
String::from("")
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
sheet.write_string(row, col_idx, &val, None)?;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
let col_idx: u16 = (basic_header.len() + col_idx).try_into().unwrap();
sheet.write_string(row, col_idx, e.extra_cols.get(*col).unwrap_or(&String::from("")), None)?;
}
}
Ok(())
}
}
| e.model.dna_nr.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) }, | conditional_block |
lib.rs | #![allow(clippy::mutable_key_type)]
use common::{anyhow::Result, NetworkType};
use core_extensions::{build_extensions, ExtensionsConfig, CURRENT_EPOCH, MATURE_THRESHOLD};
use core_rpc::{
CkbRpc, CkbRpcClient, MercuryRpc, MercuryRpcImpl, CURRENT_BLOCK_NUMBER, TX_POOL_CACHE,
USE_HEX_FORMAT,
};
use core_storage::{BatchStore, RocksdbStore, Store};
use ckb_indexer::indexer::Indexer;
use ckb_indexer::service::{IndexerRpc, IndexerRpcImpl};
use ckb_jsonrpc_types::RawTxPool;
use ckb_types::core::{BlockNumber, BlockView, RationalU256};
use ckb_types::{packed, H256, U256};
use jsonrpc_core::IoHandler;
use jsonrpc_http_server::{Server, ServerBuilder};
use jsonrpc_server_utils::cors::AccessControlAllowOrigin;
use jsonrpc_server_utils::hosts::DomainsValidation;
use log::{error, info, warn};
use rocksdb::{checkpoint::Checkpoint, DB};
use tokio::time::{sleep, Duration};
use std::collections::HashSet;
use std::net::ToSocketAddrs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
const KEEP_NUM: u64 = 100;
const PRUNE_INTERVAL: u64 = 1000;
const GENESIS_NUMBER: u64 = 0;
// Adapted from https://github.com/nervosnetwork/ckb-indexer/blob/290ae55a2d2acfc3d466a69675a1a58fcade7f5d/src/service.rs#L25
// with extensions for more indexing features.
pub struct Service {
store: RocksdbStore,
ckb_client: CkbRpcClient,
poll_interval: Duration,
listen_address: String,
rpc_thread_num: usize,
network_type: NetworkType,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: PathBuf,
cellbase_maturity: RationalU256,
cheque_since: U256,
}
impl Service {
pub fn new(
store_path: &str,
listen_address: &str,
poll_interval: Duration,
rpc_thread_num: usize,
network_ty: &str,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: &str,
cellbase_maturity: u64,
ckb_uri: String,
cheque_since: u64,
) -> Self {
let store = RocksdbStore::new(store_path);
let ckb_client = CkbRpcClient::new(ckb_uri);
let network_type = NetworkType::from_raw_str(network_ty).expect("invalid network type");
let listen_address = listen_address.to_string();
let snapshot_path = Path::new(snapshot_path).to_path_buf();
let cellbase_maturity = RationalU256::from_u256(U256::from(cellbase_maturity));
let cheque_since: U256 = cheque_since.into();
info!("Mercury running in CKB {:?}", network_type);
Service {
store,
ckb_client,
poll_interval,
listen_address,
rpc_thread_num,
network_type,
extensions_config,
snapshot_interval,
snapshot_path,
cellbase_maturity,
cheque_since,
}
}
pub fn init(&self) -> Server {
let mut io_handler = IoHandler::new();
let mercury_rpc_impl = MercuryRpcImpl::new(
self.store.clone(),
self.network_type,
self.ckb_client.clone(),
self.cheque_since.clone(),
self.extensions_config.to_rpc_config(),
);
let indexer_rpc_impl = IndexerRpcImpl {
version: "0.2.1".to_string(),
store: self.store.clone(),
};
io_handler.extend_with(indexer_rpc_impl.to_delegate());
io_handler.extend_with(mercury_rpc_impl.to_delegate());
info!("Running!");
ServerBuilder::new(io_handler)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Null,
AccessControlAllowOrigin::Any,
]))
.threads(self.rpc_thread_num)
.health_api(("/ping", "ping"))
.start_http(
&self
.listen_address
.to_socket_addrs()
.expect("config listen_address parsed")
.next()
.expect("listen_address parsed"),
)
.expect("Start Jsonrpc HTTP service")
}
#[allow(clippy::cmp_owned)]
pub async fn start(&self) {
// 0.37.0 and above supports hex format
let use_hex_format = loop {
match self.ckb_client.local_node_info().await {
Ok(local_node_info) => {
break local_node_info.version > "0.36".to_owned();
}
Err(err) => {
// < 0.32.0 compatibility
if format!("#{}", err).contains("missing field") {
break false;
}
error!("cannot get local_node_info from ckb node: {}", err);
std::thread::sleep(self.poll_interval);
}
}
};
USE_HEX_FORMAT.swap(Arc::new(use_hex_format));
let use_hex = use_hex_format;
let client_clone = self.ckb_client.clone();
tokio::spawn(async move {
update_tx_pool_cache(client_clone, use_hex).await;
});
self.run(use_hex_format).await;
}
async fn run(&self, use_hex_format: bool) {
let mut tip = 0;
loop {
let batch_store =
BatchStore::create(self.store.clone()).expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(batch_store.clone(), KEEP_NUM, u64::MAX));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
batch_store.clone(),
)
.expect("extension building failure");
let append_block_func = |block: BlockView| {
extensions.iter().for_each(|extension| {
extension
.append(&block)
.unwrap_or_else(|e| panic!("append block error {:?}", e))
});
indexer.append(&block).expect("append block should be OK");
};
// TODO: load tip first so extensions do not need to store their
// own tip?
let rollback_func = |tip_number: BlockNumber, tip_hash: packed::Byte32| {
indexer.rollback().expect("rollback block should be OK");
extensions.iter().for_each(|extension| {
extension
.rollback(tip_number, &tip_hash)
.unwrap_or_else(|e| panic!("rollback error {:?}", e))
});
}; | match self
.get_block_by_number(tip_number + 1, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
if block.parent_hash() == tip_hash {
info!("append {}, {}", block.number(), block.hash());
append_block_func(block.clone());
prune = (block.number() % PRUNE_INTERVAL) == 0;
} else {
info!("rollback {}, {}", tip_number, tip_hash);
rollback_func(tip_number, tip_hash);
}
}
Ok(None) => {
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
} else {
match self
.get_block_by_number(GENESIS_NUMBER, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
append_block_func(block);
}
Ok(None) => {
error!("ckb node returns an empty genesis block");
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get genesis block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
}
batch_store.commit().expect("commit should be OK");
let _ = *CURRENT_BLOCK_NUMBER.swap(Arc::new(tip));
if prune {
let store = BatchStore::create(self.store.clone())
.expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(store.clone(), KEEP_NUM, PRUNE_INTERVAL));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
store.clone(),
)
.expect("extension building failure");
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
indexer.prune().expect("indexer prune should be OK");
for extension in extensions.iter() {
extension
.prune(tip_number, &tip_hash, KEEP_NUM)
.expect("extension prune should be OK");
}
}
store.commit().expect("commit should be OK");
}
self.snapshot(tip);
}
}
async fn get_block_by_number(
&self,
block_number: BlockNumber,
use_hex_format: bool,
) -> Result<Option<BlockView>> {
self.ckb_client
.get_block_by_number(block_number, use_hex_format)
.await
.map(|res| res.map(Into::into))
}
fn snapshot(&self, height: u64) {
if height % self.snapshot_interval!= 0 {
return;
}
let mut path = self.snapshot_path.clone();
path.push(height.to_string());
let store = self.store.clone();
tokio::spawn(async move {
if let Err(e) = create_checkpoint(store.inner(), path) {
error!("build {} checkpoint failed: {:?}", height, e);
}
});
}
fn change_current_epoch(&self, current_epoch: RationalU256) {
self.change_maturity_threshold(current_epoch.clone());
let mut epoch = CURRENT_EPOCH.write();
*epoch = current_epoch;
}
fn change_maturity_threshold(&self, current_epoch: RationalU256) {
if current_epoch < self.cellbase_maturity {
return;
}
let new = current_epoch - self.cellbase_maturity.clone();
let mut threshold = MATURE_THRESHOLD.write();
*threshold = new;
}
}
fn create_checkpoint(db: &DB, path: PathBuf) -> Result<()> {
Checkpoint::new(db)?.create_checkpoint(path)?;
Ok(())
}
async fn update_tx_pool_cache(ckb_client: CkbRpcClient, use_hex_format: bool) {
loop {
match ckb_client.get_raw_tx_pool(Some(use_hex_format)).await {
Ok(raw_pool) => handle_raw_tx_pool(&ckb_client, raw_pool).await,
Err(e) => error!("get raw tx pool error {:?}", e),
}
sleep(Duration::from_millis(350)).await;
}
}
async fn handle_raw_tx_pool(ckb_client: &CkbRpcClient, raw_pool: RawTxPool) {
let mut input_set: HashSet<packed::OutPoint> = HashSet::new();
let hashes = tx_hash_list(raw_pool);
if let Ok(res) = ckb_client.get_transactions(hashes).await {
for item in res.iter() {
if let Some(tx) = item {
for input in tx.transaction.inner.inputs.clone().into_iter() {
input_set.insert(input.previous_output.into());
}
} else {
warn!("Get transaction from pool failed");
}
}
}
let mut pool_cache = TX_POOL_CACHE.write();
*pool_cache = input_set;
}
fn tx_hash_list(raw_pool: RawTxPool) -> Vec<H256> {
match raw_pool {
RawTxPool::Ids(mut ids) => {
let mut ret = ids.pending;
ret.append(&mut ids.proposed);
ret
}
RawTxPool::Verbose(map) => {
let mut ret = map.pending.into_iter().map(|(k, _v)| k).collect::<Vec<_>>();
let mut proposed = map
.proposed
.into_iter()
.map(|(k, _v)| k)
.collect::<Vec<_>>();
ret.append(&mut proposed);
ret
}
}
} |
let mut prune = false;
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
tip = tip_number;
| random_line_split |
lib.rs | #![allow(clippy::mutable_key_type)]
use common::{anyhow::Result, NetworkType};
use core_extensions::{build_extensions, ExtensionsConfig, CURRENT_EPOCH, MATURE_THRESHOLD};
use core_rpc::{
CkbRpc, CkbRpcClient, MercuryRpc, MercuryRpcImpl, CURRENT_BLOCK_NUMBER, TX_POOL_CACHE,
USE_HEX_FORMAT,
};
use core_storage::{BatchStore, RocksdbStore, Store};
use ckb_indexer::indexer::Indexer;
use ckb_indexer::service::{IndexerRpc, IndexerRpcImpl};
use ckb_jsonrpc_types::RawTxPool;
use ckb_types::core::{BlockNumber, BlockView, RationalU256};
use ckb_types::{packed, H256, U256};
use jsonrpc_core::IoHandler;
use jsonrpc_http_server::{Server, ServerBuilder};
use jsonrpc_server_utils::cors::AccessControlAllowOrigin;
use jsonrpc_server_utils::hosts::DomainsValidation;
use log::{error, info, warn};
use rocksdb::{checkpoint::Checkpoint, DB};
use tokio::time::{sleep, Duration};
use std::collections::HashSet;
use std::net::ToSocketAddrs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
const KEEP_NUM: u64 = 100;
const PRUNE_INTERVAL: u64 = 1000;
const GENESIS_NUMBER: u64 = 0;
// Adapted from https://github.com/nervosnetwork/ckb-indexer/blob/290ae55a2d2acfc3d466a69675a1a58fcade7f5d/src/service.rs#L25
// with extensions for more indexing features.
pub struct Service {
store: RocksdbStore,
ckb_client: CkbRpcClient,
poll_interval: Duration,
listen_address: String,
rpc_thread_num: usize,
network_type: NetworkType,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: PathBuf,
cellbase_maturity: RationalU256,
cheque_since: U256,
}
impl Service {
pub fn new(
store_path: &str,
listen_address: &str,
poll_interval: Duration,
rpc_thread_num: usize,
network_ty: &str,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: &str,
cellbase_maturity: u64,
ckb_uri: String,
cheque_since: u64,
) -> Self {
let store = RocksdbStore::new(store_path);
let ckb_client = CkbRpcClient::new(ckb_uri);
let network_type = NetworkType::from_raw_str(network_ty).expect("invalid network type");
let listen_address = listen_address.to_string();
let snapshot_path = Path::new(snapshot_path).to_path_buf();
let cellbase_maturity = RationalU256::from_u256(U256::from(cellbase_maturity));
let cheque_since: U256 = cheque_since.into();
info!("Mercury running in CKB {:?}", network_type);
Service {
store,
ckb_client,
poll_interval,
listen_address,
rpc_thread_num,
network_type,
extensions_config,
snapshot_interval,
snapshot_path,
cellbase_maturity,
cheque_since,
}
}
pub fn init(&self) -> Server {
let mut io_handler = IoHandler::new();
let mercury_rpc_impl = MercuryRpcImpl::new(
self.store.clone(),
self.network_type,
self.ckb_client.clone(),
self.cheque_since.clone(),
self.extensions_config.to_rpc_config(),
);
let indexer_rpc_impl = IndexerRpcImpl {
version: "0.2.1".to_string(),
store: self.store.clone(),
};
io_handler.extend_with(indexer_rpc_impl.to_delegate());
io_handler.extend_with(mercury_rpc_impl.to_delegate());
info!("Running!");
ServerBuilder::new(io_handler)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Null,
AccessControlAllowOrigin::Any,
]))
.threads(self.rpc_thread_num)
.health_api(("/ping", "ping"))
.start_http(
&self
.listen_address
.to_socket_addrs()
.expect("config listen_address parsed")
.next()
.expect("listen_address parsed"),
)
.expect("Start Jsonrpc HTTP service")
}
#[allow(clippy::cmp_owned)]
pub async fn start(&self) {
// 0.37.0 and above supports hex format
let use_hex_format = loop {
match self.ckb_client.local_node_info().await {
Ok(local_node_info) => {
break local_node_info.version > "0.36".to_owned();
}
Err(err) => {
// < 0.32.0 compatibility
if format!("#{}", err).contains("missing field") {
break false;
}
error!("cannot get local_node_info from ckb node: {}", err);
std::thread::sleep(self.poll_interval);
}
}
};
USE_HEX_FORMAT.swap(Arc::new(use_hex_format));
let use_hex = use_hex_format;
let client_clone = self.ckb_client.clone();
tokio::spawn(async move {
update_tx_pool_cache(client_clone, use_hex).await;
});
self.run(use_hex_format).await;
}
async fn run(&self, use_hex_format: bool) {
let mut tip = 0;
loop {
let batch_store =
BatchStore::create(self.store.clone()).expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(batch_store.clone(), KEEP_NUM, u64::MAX));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
batch_store.clone(),
)
.expect("extension building failure");
let append_block_func = |block: BlockView| {
extensions.iter().for_each(|extension| {
extension
.append(&block)
.unwrap_or_else(|e| panic!("append block error {:?}", e))
});
indexer.append(&block).expect("append block should be OK");
};
// TODO: load tip first so extensions do not need to store their
// own tip?
let rollback_func = |tip_number: BlockNumber, tip_hash: packed::Byte32| {
indexer.rollback().expect("rollback block should be OK");
extensions.iter().for_each(|extension| {
extension
.rollback(tip_number, &tip_hash)
.unwrap_or_else(|e| panic!("rollback error {:?}", e))
});
};
let mut prune = false;
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
tip = tip_number;
match self
.get_block_by_number(tip_number + 1, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
if block.parent_hash() == tip_hash {
info!("append {}, {}", block.number(), block.hash());
append_block_func(block.clone());
prune = (block.number() % PRUNE_INTERVAL) == 0;
} else {
info!("rollback {}, {}", tip_number, tip_hash);
rollback_func(tip_number, tip_hash);
}
}
Ok(None) => {
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
} else {
match self
.get_block_by_number(GENESIS_NUMBER, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
append_block_func(block);
}
Ok(None) => {
error!("ckb node returns an empty genesis block");
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get genesis block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
}
batch_store.commit().expect("commit should be OK");
let _ = *CURRENT_BLOCK_NUMBER.swap(Arc::new(tip));
if prune {
let store = BatchStore::create(self.store.clone())
.expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(store.clone(), KEEP_NUM, PRUNE_INTERVAL));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
store.clone(),
)
.expect("extension building failure");
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
indexer.prune().expect("indexer prune should be OK");
for extension in extensions.iter() {
extension
.prune(tip_number, &tip_hash, KEEP_NUM)
.expect("extension prune should be OK");
}
}
store.commit().expect("commit should be OK");
}
self.snapshot(tip);
}
}
async fn get_block_by_number(
&self,
block_number: BlockNumber,
use_hex_format: bool,
) -> Result<Option<BlockView>> {
self.ckb_client
.get_block_by_number(block_number, use_hex_format)
.await
.map(|res| res.map(Into::into))
}
fn snapshot(&self, height: u64) |
fn change_current_epoch(&self, current_epoch: RationalU256) {
self.change_maturity_threshold(current_epoch.clone());
let mut epoch = CURRENT_EPOCH.write();
*epoch = current_epoch;
}
fn change_maturity_threshold(&self, current_epoch: RationalU256) {
if current_epoch < self.cellbase_maturity {
return;
}
let new = current_epoch - self.cellbase_maturity.clone();
let mut threshold = MATURE_THRESHOLD.write();
*threshold = new;
}
}
fn create_checkpoint(db: &DB, path: PathBuf) -> Result<()> {
Checkpoint::new(db)?.create_checkpoint(path)?;
Ok(())
}
async fn update_tx_pool_cache(ckb_client: CkbRpcClient, use_hex_format: bool) {
loop {
match ckb_client.get_raw_tx_pool(Some(use_hex_format)).await {
Ok(raw_pool) => handle_raw_tx_pool(&ckb_client, raw_pool).await,
Err(e) => error!("get raw tx pool error {:?}", e),
}
sleep(Duration::from_millis(350)).await;
}
}
async fn handle_raw_tx_pool(ckb_client: &CkbRpcClient, raw_pool: RawTxPool) {
let mut input_set: HashSet<packed::OutPoint> = HashSet::new();
let hashes = tx_hash_list(raw_pool);
if let Ok(res) = ckb_client.get_transactions(hashes).await {
for item in res.iter() {
if let Some(tx) = item {
for input in tx.transaction.inner.inputs.clone().into_iter() {
input_set.insert(input.previous_output.into());
}
} else {
warn!("Get transaction from pool failed");
}
}
}
let mut pool_cache = TX_POOL_CACHE.write();
*pool_cache = input_set;
}
fn tx_hash_list(raw_pool: RawTxPool) -> Vec<H256> {
match raw_pool {
RawTxPool::Ids(mut ids) => {
let mut ret = ids.pending;
ret.append(&mut ids.proposed);
ret
}
RawTxPool::Verbose(map) => {
let mut ret = map.pending.into_iter().map(|(k, _v)| k).collect::<Vec<_>>();
let mut proposed = map
.proposed
.into_iter()
.map(|(k, _v)| k)
.collect::<Vec<_>>();
ret.append(&mut proposed);
ret
}
}
}
| {
if height % self.snapshot_interval != 0 {
return;
}
let mut path = self.snapshot_path.clone();
path.push(height.to_string());
let store = self.store.clone();
tokio::spawn(async move {
if let Err(e) = create_checkpoint(store.inner(), path) {
error!("build {} checkpoint failed: {:?}", height, e);
}
});
} | identifier_body |
lib.rs | #![allow(clippy::mutable_key_type)]
use common::{anyhow::Result, NetworkType};
use core_extensions::{build_extensions, ExtensionsConfig, CURRENT_EPOCH, MATURE_THRESHOLD};
use core_rpc::{
CkbRpc, CkbRpcClient, MercuryRpc, MercuryRpcImpl, CURRENT_BLOCK_NUMBER, TX_POOL_CACHE,
USE_HEX_FORMAT,
};
use core_storage::{BatchStore, RocksdbStore, Store};
use ckb_indexer::indexer::Indexer;
use ckb_indexer::service::{IndexerRpc, IndexerRpcImpl};
use ckb_jsonrpc_types::RawTxPool;
use ckb_types::core::{BlockNumber, BlockView, RationalU256};
use ckb_types::{packed, H256, U256};
use jsonrpc_core::IoHandler;
use jsonrpc_http_server::{Server, ServerBuilder};
use jsonrpc_server_utils::cors::AccessControlAllowOrigin;
use jsonrpc_server_utils::hosts::DomainsValidation;
use log::{error, info, warn};
use rocksdb::{checkpoint::Checkpoint, DB};
use tokio::time::{sleep, Duration};
use std::collections::HashSet;
use std::net::ToSocketAddrs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
const KEEP_NUM: u64 = 100;
const PRUNE_INTERVAL: u64 = 1000;
const GENESIS_NUMBER: u64 = 0;
// Adapted from https://github.com/nervosnetwork/ckb-indexer/blob/290ae55a2d2acfc3d466a69675a1a58fcade7f5d/src/service.rs#L25
// with extensions for more indexing features.
pub struct Service {
store: RocksdbStore,
ckb_client: CkbRpcClient,
poll_interval: Duration,
listen_address: String,
rpc_thread_num: usize,
network_type: NetworkType,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: PathBuf,
cellbase_maturity: RationalU256,
cheque_since: U256,
}
impl Service {
pub fn new(
store_path: &str,
listen_address: &str,
poll_interval: Duration,
rpc_thread_num: usize,
network_ty: &str,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: &str,
cellbase_maturity: u64,
ckb_uri: String,
cheque_since: u64,
) -> Self {
let store = RocksdbStore::new(store_path);
let ckb_client = CkbRpcClient::new(ckb_uri);
let network_type = NetworkType::from_raw_str(network_ty).expect("invalid network type");
let listen_address = listen_address.to_string();
let snapshot_path = Path::new(snapshot_path).to_path_buf();
let cellbase_maturity = RationalU256::from_u256(U256::from(cellbase_maturity));
let cheque_since: U256 = cheque_since.into();
info!("Mercury running in CKB {:?}", network_type);
Service {
store,
ckb_client,
poll_interval,
listen_address,
rpc_thread_num,
network_type,
extensions_config,
snapshot_interval,
snapshot_path,
cellbase_maturity,
cheque_since,
}
}
pub fn init(&self) -> Server {
let mut io_handler = IoHandler::new();
let mercury_rpc_impl = MercuryRpcImpl::new(
self.store.clone(),
self.network_type,
self.ckb_client.clone(),
self.cheque_since.clone(),
self.extensions_config.to_rpc_config(),
);
let indexer_rpc_impl = IndexerRpcImpl {
version: "0.2.1".to_string(),
store: self.store.clone(),
};
io_handler.extend_with(indexer_rpc_impl.to_delegate());
io_handler.extend_with(mercury_rpc_impl.to_delegate());
info!("Running!");
ServerBuilder::new(io_handler)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Null,
AccessControlAllowOrigin::Any,
]))
.threads(self.rpc_thread_num)
.health_api(("/ping", "ping"))
.start_http(
&self
.listen_address
.to_socket_addrs()
.expect("config listen_address parsed")
.next()
.expect("listen_address parsed"),
)
.expect("Start Jsonrpc HTTP service")
}
#[allow(clippy::cmp_owned)]
pub async fn start(&self) {
// 0.37.0 and above supports hex format
let use_hex_format = loop {
match self.ckb_client.local_node_info().await {
Ok(local_node_info) => {
break local_node_info.version > "0.36".to_owned();
}
Err(err) => {
// < 0.32.0 compatibility
if format!("#{}", err).contains("missing field") {
break false;
}
error!("cannot get local_node_info from ckb node: {}", err);
std::thread::sleep(self.poll_interval);
}
}
};
USE_HEX_FORMAT.swap(Arc::new(use_hex_format));
let use_hex = use_hex_format;
let client_clone = self.ckb_client.clone();
tokio::spawn(async move {
update_tx_pool_cache(client_clone, use_hex).await;
});
self.run(use_hex_format).await;
}
async fn run(&self, use_hex_format: bool) {
let mut tip = 0;
loop {
let batch_store =
BatchStore::create(self.store.clone()).expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(batch_store.clone(), KEEP_NUM, u64::MAX));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
batch_store.clone(),
)
.expect("extension building failure");
let append_block_func = |block: BlockView| {
extensions.iter().for_each(|extension| {
extension
.append(&block)
.unwrap_or_else(|e| panic!("append block error {:?}", e))
});
indexer.append(&block).expect("append block should be OK");
};
// TODO: load tip first so extensions do not need to store their
// own tip?
let rollback_func = |tip_number: BlockNumber, tip_hash: packed::Byte32| {
indexer.rollback().expect("rollback block should be OK");
extensions.iter().for_each(|extension| {
extension
.rollback(tip_number, &tip_hash)
.unwrap_or_else(|e| panic!("rollback error {:?}", e))
});
};
let mut prune = false;
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
tip = tip_number;
match self
.get_block_by_number(tip_number + 1, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
if block.parent_hash() == tip_hash {
info!("append {}, {}", block.number(), block.hash());
append_block_func(block.clone());
prune = (block.number() % PRUNE_INTERVAL) == 0;
} else {
info!("rollback {}, {}", tip_number, tip_hash);
rollback_func(tip_number, tip_hash);
}
}
Ok(None) => {
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
} else {
match self
.get_block_by_number(GENESIS_NUMBER, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
append_block_func(block);
}
Ok(None) => {
error!("ckb node returns an empty genesis block");
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get genesis block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
}
batch_store.commit().expect("commit should be OK");
let _ = *CURRENT_BLOCK_NUMBER.swap(Arc::new(tip));
if prune {
let store = BatchStore::create(self.store.clone())
.expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(store.clone(), KEEP_NUM, PRUNE_INTERVAL));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
store.clone(),
)
.expect("extension building failure");
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
indexer.prune().expect("indexer prune should be OK");
for extension in extensions.iter() {
extension
.prune(tip_number, &tip_hash, KEEP_NUM)
.expect("extension prune should be OK");
}
}
store.commit().expect("commit should be OK");
}
self.snapshot(tip);
}
}
async fn get_block_by_number(
&self,
block_number: BlockNumber,
use_hex_format: bool,
) -> Result<Option<BlockView>> {
self.ckb_client
.get_block_by_number(block_number, use_hex_format)
.await
.map(|res| res.map(Into::into))
}
fn snapshot(&self, height: u64) {
if height % self.snapshot_interval!= 0 {
return;
}
let mut path = self.snapshot_path.clone();
path.push(height.to_string());
let store = self.store.clone();
tokio::spawn(async move {
if let Err(e) = create_checkpoint(store.inner(), path) {
error!("build {} checkpoint failed: {:?}", height, e);
}
});
}
fn change_current_epoch(&self, current_epoch: RationalU256) {
self.change_maturity_threshold(current_epoch.clone());
let mut epoch = CURRENT_EPOCH.write();
*epoch = current_epoch;
}
fn change_maturity_threshold(&self, current_epoch: RationalU256) {
if current_epoch < self.cellbase_maturity {
return;
}
let new = current_epoch - self.cellbase_maturity.clone();
let mut threshold = MATURE_THRESHOLD.write();
*threshold = new;
}
}
fn create_checkpoint(db: &DB, path: PathBuf) -> Result<()> {
Checkpoint::new(db)?.create_checkpoint(path)?;
Ok(())
}
async fn update_tx_pool_cache(ckb_client: CkbRpcClient, use_hex_format: bool) {
loop {
match ckb_client.get_raw_tx_pool(Some(use_hex_format)).await {
Ok(raw_pool) => handle_raw_tx_pool(&ckb_client, raw_pool).await,
Err(e) => error!("get raw tx pool error {:?}", e),
}
sleep(Duration::from_millis(350)).await;
}
}
async fn | (ckb_client: &CkbRpcClient, raw_pool: RawTxPool) {
let mut input_set: HashSet<packed::OutPoint> = HashSet::new();
let hashes = tx_hash_list(raw_pool);
if let Ok(res) = ckb_client.get_transactions(hashes).await {
for item in res.iter() {
if let Some(tx) = item {
for input in tx.transaction.inner.inputs.clone().into_iter() {
input_set.insert(input.previous_output.into());
}
} else {
warn!("Get transaction from pool failed");
}
}
}
let mut pool_cache = TX_POOL_CACHE.write();
*pool_cache = input_set;
}
fn tx_hash_list(raw_pool: RawTxPool) -> Vec<H256> {
match raw_pool {
RawTxPool::Ids(mut ids) => {
let mut ret = ids.pending;
ret.append(&mut ids.proposed);
ret
}
RawTxPool::Verbose(map) => {
let mut ret = map.pending.into_iter().map(|(k, _v)| k).collect::<Vec<_>>();
let mut proposed = map
.proposed
.into_iter()
.map(|(k, _v)| k)
.collect::<Vec<_>>();
ret.append(&mut proposed);
ret
}
}
}
| handle_raw_tx_pool | identifier_name |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{ErrorKind, Read, Write};
use std::os::unix::io::{FromRawFd, RawFd};
use std::ptr::NonNull;
use std::sync::{Arc, Mutex};
use std::{slice, usize};
use pcid_interface::{PciBar, PciFeature, PciFeatureInfo, PciFunction, PcidServerHandle};
use syscall::{
CloneFlags, Event, Mmio, Packet, Result, SchemeBlockMut, PHYSMAP_NO_CACHE,
PHYSMAP_WRITE,
};
use redox_log::{OutputBuilder, RedoxLogger};
use self::nvme::{InterruptMethod, InterruptSources, Nvme};
use self::scheme::DiskScheme;
mod nvme;
mod scheme;
/// A wrapper for a BAR allocation.
pub struct Bar {
ptr: NonNull<u8>,
physical: usize,
bar_size: usize,
}
impl Bar {
pub fn allocate(bar: usize, bar_size: usize) -> Result<Self> {
Ok(Self {
ptr: NonNull::new(
unsafe { syscall::physmap(bar, bar_size, PHYSMAP_NO_CACHE | PHYSMAP_WRITE)? as *mut u8 },
)
.expect("Mapping a BAR resulted in a nullptr"),
physical: bar,
bar_size,
})
}
}
impl Drop for Bar {
fn drop(&mut self) {
let _ = unsafe { syscall::physunmap(self.physical) };
}
}
/// The PCI BARs that may be allocated.
#[derive(Default)]
pub struct AllocatedBars(pub [Mutex<Option<Bar>>; 6]);
/// Get the most optimal yet functional interrupt mechanism: either (in the order of preference):
/// MSI-X, MSI, and INTx# pin. Returns both runtime interrupt structures (MSI/MSI-X capability
/// structures), and the handles to the interrupts.
fn get_int_method(
pcid_handle: &mut PcidServerHandle,
function: &PciFunction,
allocated_bars: &AllocatedBars,
) -> Result<(InterruptMethod, InterruptSources)> {
log::trace!("Begin get_int_method");
use pcid_interface::irq_helpers;
let features = pcid_handle.fetch_all_features().unwrap();
let has_msi = features.iter().any(|(feature, _)| feature.is_msi());
let has_msix = features.iter().any(|(feature, _)| feature.is_msix());
// TODO: Allocate more than one vector when possible and useful.
if has_msix {
// Extended message signaled interrupts.
use self::nvme::MsixCfg;
use pcid_interface::msi::MsixTableEntry;
let mut capability_struct = match pcid_handle.feature_info(PciFeature::MsiX).unwrap() {
PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
fn bar_base(
allocated_bars: &AllocatedBars,
function: &PciFunction,
bir: u8,
) -> Result<NonNull<u8>> {
let bir = usize::from(bir);
let mut bar_guard = allocated_bars.0[bir].lock().unwrap();
match &mut *bar_guard {
&mut Some(ref bar) => Ok(bar.ptr),
bar_to_set @ &mut None => {
let bar = match function.bars[bir] {
PciBar::Memory(addr) => addr,
other => panic!("Expected memory BAR, found {:?}", other),
};
let bar_size = function.bar_sizes[bir];
let bar = Bar::allocate(bar as usize, bar_size as usize)?;
*bar_to_set = Some(bar);
Ok(bar_to_set.as_ref().unwrap().ptr)
}
}
}
let table_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.table_bir())?.as_ptr();
let pba_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.pba_bir())?.as_ptr();
let table_base =
unsafe { table_bar_base.offset(capability_struct.table_offset() as isize) };
let pba_base = unsafe { pba_bar_base.offset(capability_struct.pba_offset() as isize) };
let vector_count = capability_struct.table_size();
let table_entries: &'static mut [MsixTableEntry] = unsafe {
slice::from_raw_parts_mut(table_base as *mut MsixTableEntry, vector_count as usize)
};
let pba_entries: &'static mut [Mmio<u64>] = unsafe {
slice::from_raw_parts_mut(
table_base as *mut Mmio<u64>,
(vector_count as usize + 63) / 64,
)
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for table_entry in table_entries.iter_mut() {
table_entry.mask();
}
pcid_handle.enable_feature(PciFeature::MsiX).unwrap();
capability_struct.set_msix_enabled(true); // only affects our local mirror of the cap
let (msix_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
let entry: &mut MsixTableEntry = &mut table_entries[0];
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI-X interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data = msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector);
entry.set_addr_lo(msg_addr);
entry.set_msg_data(msg_data);
(0, irq_handle)
};
let interrupt_method = InterruptMethod::MsiX(MsixCfg {
cap: capability_struct,
table: table_entries,
pba: pba_entries,
});
let interrupt_sources =
InterruptSources::MsiX(std::iter::once((msix_vector_number, irq_handle)).collect());
Ok((interrupt_method, interrupt_sources))
} else if has_msi {
// Message signaled interrupts.
let capability_struct = match pcid_handle.feature_info(PciFeature::Msi).unwrap() {
PciFeatureInfo::Msi(msi) => msi,
_ => unreachable!(),
};
let (msi_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
use pcid_interface::{MsiSetFeatureInfo, SetFeatureInfo};
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read BSP APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data =
msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector) as u16;
pcid_handle.set_feature_info(SetFeatureInfo::Msi(MsiSetFeatureInfo {
message_address: Some(msg_addr),
message_upper_address: Some(0),
message_data: Some(msg_data),
multi_message_enable: Some(0), // enable 2^0=1 vectors
mask_bits: None,
})).unwrap();
(0, irq_handle)
};
let interrupt_method = InterruptMethod::Msi(capability_struct);
let interrupt_sources =
InterruptSources::Msi(std::iter::once((msi_vector_number, irq_handle)).collect());
pcid_handle.enable_feature(PciFeature::Msi).unwrap();
Ok((interrupt_method, interrupt_sources))
} else if function.legacy_interrupt_pin.is_some() {
// INTx# pin based interrupts.
let irq_handle = File::open(format!("irq:{}", function.legacy_interrupt_line))
.expect("nvmed: failed to open INTx# interrupt line");
Ok((InterruptMethod::Intx, InterruptSources::Intx(irq_handle)))
} else {
// No interrupts at all
todo!("handling of no interrupts")
}
}
fn setup_logging() -> Option<&'static RedoxLogger> {
let mut logger = RedoxLogger::new()
.with_output(
OutputBuilder::stderr()
.with_filter(log::LevelFilter::Info) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.log") {
Ok(b) => logger = logger.with_output(
// TODO: Add a configuration file for this
b.with_filter(log::LevelFilter::Info)
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.ansi.log") {
Ok(b) => logger = logger.with_output(
b.with_filter(log::LevelFilter::Info)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.ansi.log: {}", error),
}
match logger.enable() {
Ok(logger_ref) => {
eprintln!("nvmed: enabled logger");
Some(logger_ref)
}
Err(error) => {
eprintln!("nvmed: failed to set default logger: {}", error);
None
}
}
}
fn | () {
// Daemonize
if unsafe { syscall::clone(CloneFlags::empty()).unwrap() }!= 0 {
return;
}
let _logger_ref = setup_logging();
let mut pcid_handle =
PcidServerHandle::connect_default().expect("nvmed: failed to setup channel to pcid");
let pci_config = pcid_handle
.fetch_config()
.expect("nvmed: failed to fetch config");
let bar = match pci_config.func.bars[0] {
PciBar::Memory(mem) => mem,
other => panic!("received a non-memory BAR ({:?})", other),
};
let bar_size = pci_config.func.bar_sizes[0];
let irq = pci_config.func.legacy_interrupt_line;
let mut name = pci_config.func.name();
name.push_str("_nvme");
log::info!("NVME PCI CONFIG: {:?}", pci_config);
let allocated_bars = AllocatedBars::default();
let address = unsafe {
syscall::physmap(
bar as usize,
bar_size as usize,
PHYSMAP_WRITE | PHYSMAP_NO_CACHE,
)
.expect("nvmed: failed to map address")
};
*allocated_bars.0[0].lock().unwrap() = Some(Bar {
physical: bar as usize,
bar_size: bar_size as usize,
ptr: NonNull::new(address as *mut u8).expect("Physmapping BAR gave nullptr"),
});
let event_fd = syscall::open("event:", syscall::O_RDWR | syscall::O_CLOEXEC)
.expect("nvmed: failed to open event queue");
let mut event_file = unsafe { File::from_raw_fd(event_fd as RawFd) };
let scheme_name = format!("disk/{}", name);
let socket_fd = syscall::open(
&format!(":{}", scheme_name),
syscall::O_RDWR | syscall::O_CREAT | syscall::O_NONBLOCK | syscall::O_CLOEXEC,
)
.expect("nvmed: failed to create disk scheme");
syscall::write(
event_fd,
&syscall::Event {
id: socket_fd,
flags: syscall::EVENT_READ,
data: 0,
},
)
.expect("nvmed: failed to watch disk scheme events");
let mut socket_file = unsafe { File::from_raw_fd(socket_fd as RawFd) };
let (reactor_sender, reactor_receiver) = crossbeam_channel::unbounded();
let (interrupt_method, interrupt_sources) =
get_int_method(&mut pcid_handle, &pci_config.func, &allocated_bars)
.expect("nvmed: failed to find a suitable interrupt method");
let mut nvme = Nvme::new(address, interrupt_method, pcid_handle, reactor_sender)
.expect("nvmed: failed to allocate driver data");
unsafe { nvme.init() }
log::debug!("Finished base initialization");
let nvme = Arc::new(nvme);
let reactor_thread = nvme::cq_reactor::start_cq_reactor_thread(Arc::clone(&nvme), interrupt_sources, reactor_receiver);
let namespaces = futures::executor::block_on(nvme.init_with_queues());
syscall::setrens(0, 0).expect("nvmed: failed to enter null namespace");
let mut scheme = DiskScheme::new(scheme_name, nvme, namespaces);
let mut todo = Vec::new();
'events: loop {
let mut event = Event::default();
if event_file
.read(&mut event)
.expect("nvmed: failed to read event queue")
== 0
{
break;
}
match event.data {
0 => loop {
let mut packet = Packet::default();
match socket_file.read(&mut packet) {
Ok(0) => break 'events,
Ok(_) => (),
Err(err) => match err.kind() {
ErrorKind::WouldBlock => break,
_ => Err(err).expect("nvmed: failed to read disk scheme"),
},
}
todo.push(packet);
},
unknown => {
panic!("nvmed: unknown event data {}", unknown);
}
}
let mut i = 0;
while i < todo.len() {
if let Some(a) = scheme.handle(&todo[i]) {
let mut packet = todo.remove(i);
packet.a = a;
socket_file
.write(&packet)
.expect("nvmed: failed to write disk scheme");
} else {
i += 1;
}
}
}
//TODO: destroy NVMe stuff
reactor_thread.join().expect("nvmed: failed to join reactor thread");
}
| main | identifier_name |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{ErrorKind, Read, Write};
use std::os::unix::io::{FromRawFd, RawFd};
use std::ptr::NonNull;
use std::sync::{Arc, Mutex};
use std::{slice, usize};
use pcid_interface::{PciBar, PciFeature, PciFeatureInfo, PciFunction, PcidServerHandle};
use syscall::{
CloneFlags, Event, Mmio, Packet, Result, SchemeBlockMut, PHYSMAP_NO_CACHE,
PHYSMAP_WRITE,
};
use redox_log::{OutputBuilder, RedoxLogger};
use self::nvme::{InterruptMethod, InterruptSources, Nvme};
use self::scheme::DiskScheme;
mod nvme;
mod scheme;
/// A wrapper for a BAR allocation.
pub struct Bar {
ptr: NonNull<u8>,
physical: usize,
bar_size: usize,
}
impl Bar {
pub fn allocate(bar: usize, bar_size: usize) -> Result<Self> {
Ok(Self {
ptr: NonNull::new(
unsafe { syscall::physmap(bar, bar_size, PHYSMAP_NO_CACHE | PHYSMAP_WRITE)? as *mut u8 },
)
.expect("Mapping a BAR resulted in a nullptr"),
physical: bar,
bar_size,
})
}
}
impl Drop for Bar {
fn drop(&mut self) {
let _ = unsafe { syscall::physunmap(self.physical) };
}
}
/// The PCI BARs that may be allocated.
#[derive(Default)]
pub struct AllocatedBars(pub [Mutex<Option<Bar>>; 6]);
/// Get the most optimal yet functional interrupt mechanism: either (in the order of preference):
/// MSI-X, MSI, and INTx# pin. Returns both runtime interrupt structures (MSI/MSI-X capability
/// structures), and the handles to the interrupts.
fn get_int_method(
pcid_handle: &mut PcidServerHandle,
function: &PciFunction,
allocated_bars: &AllocatedBars,
) -> Result<(InterruptMethod, InterruptSources)> {
log::trace!("Begin get_int_method");
use pcid_interface::irq_helpers;
let features = pcid_handle.fetch_all_features().unwrap();
let has_msi = features.iter().any(|(feature, _)| feature.is_msi());
let has_msix = features.iter().any(|(feature, _)| feature.is_msix());
// TODO: Allocate more than one vector when possible and useful.
if has_msix {
// Extended message signaled interrupts.
use self::nvme::MsixCfg;
use pcid_interface::msi::MsixTableEntry;
let mut capability_struct = match pcid_handle.feature_info(PciFeature::MsiX).unwrap() {
PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
fn bar_base(
allocated_bars: &AllocatedBars,
function: &PciFunction,
bir: u8,
) -> Result<NonNull<u8>> {
let bir = usize::from(bir);
let mut bar_guard = allocated_bars.0[bir].lock().unwrap();
match &mut *bar_guard {
&mut Some(ref bar) => Ok(bar.ptr),
bar_to_set @ &mut None => {
let bar = match function.bars[bir] {
PciBar::Memory(addr) => addr,
other => panic!("Expected memory BAR, found {:?}", other),
};
let bar_size = function.bar_sizes[bir];
let bar = Bar::allocate(bar as usize, bar_size as usize)?;
*bar_to_set = Some(bar);
Ok(bar_to_set.as_ref().unwrap().ptr)
}
}
}
let table_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.table_bir())?.as_ptr();
let pba_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.pba_bir())?.as_ptr();
let table_base =
unsafe { table_bar_base.offset(capability_struct.table_offset() as isize) };
let pba_base = unsafe { pba_bar_base.offset(capability_struct.pba_offset() as isize) };
let vector_count = capability_struct.table_size();
let table_entries: &'static mut [MsixTableEntry] = unsafe {
slice::from_raw_parts_mut(table_base as *mut MsixTableEntry, vector_count as usize)
};
let pba_entries: &'static mut [Mmio<u64>] = unsafe {
slice::from_raw_parts_mut(
table_base as *mut Mmio<u64>,
(vector_count as usize + 63) / 64,
)
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for table_entry in table_entries.iter_mut() {
table_entry.mask();
}
pcid_handle.enable_feature(PciFeature::MsiX).unwrap();
capability_struct.set_msix_enabled(true); // only affects our local mirror of the cap
let (msix_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
let entry: &mut MsixTableEntry = &mut table_entries[0];
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI-X interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data = msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector);
entry.set_addr_lo(msg_addr);
entry.set_msg_data(msg_data);
(0, irq_handle)
};
let interrupt_method = InterruptMethod::MsiX(MsixCfg {
cap: capability_struct,
table: table_entries,
pba: pba_entries,
});
let interrupt_sources =
InterruptSources::MsiX(std::iter::once((msix_vector_number, irq_handle)).collect());
Ok((interrupt_method, interrupt_sources))
} else if has_msi {
// Message signaled interrupts.
let capability_struct = match pcid_handle.feature_info(PciFeature::Msi).unwrap() {
PciFeatureInfo::Msi(msi) => msi,
_ => unreachable!(),
};
let (msi_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
use pcid_interface::{MsiSetFeatureInfo, SetFeatureInfo};
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read BSP APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data =
msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector) as u16;
pcid_handle.set_feature_info(SetFeatureInfo::Msi(MsiSetFeatureInfo {
message_address: Some(msg_addr),
message_upper_address: Some(0),
message_data: Some(msg_data),
multi_message_enable: Some(0), // enable 2^0=1 vectors
mask_bits: None,
})).unwrap();
(0, irq_handle)
};
let interrupt_method = InterruptMethod::Msi(capability_struct);
let interrupt_sources =
InterruptSources::Msi(std::iter::once((msi_vector_number, irq_handle)).collect());
pcid_handle.enable_feature(PciFeature::Msi).unwrap();
Ok((interrupt_method, interrupt_sources))
} else if function.legacy_interrupt_pin.is_some() {
// INTx# pin based interrupts.
let irq_handle = File::open(format!("irq:{}", function.legacy_interrupt_line))
.expect("nvmed: failed to open INTx# interrupt line");
Ok((InterruptMethod::Intx, InterruptSources::Intx(irq_handle)))
} else {
// No interrupts at all
todo!("handling of no interrupts")
}
}
fn setup_logging() -> Option<&'static RedoxLogger> {
let mut logger = RedoxLogger::new()
.with_output(
OutputBuilder::stderr()
.with_filter(log::LevelFilter::Info) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.log") {
Ok(b) => logger = logger.with_output(
// TODO: Add a configuration file for this
b.with_filter(log::LevelFilter::Info)
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.ansi.log") {
Ok(b) => logger = logger.with_output(
b.with_filter(log::LevelFilter::Info)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.ansi.log: {}", error),
}
match logger.enable() {
Ok(logger_ref) => |
Err(error) => {
eprintln!("nvmed: failed to set default logger: {}", error);
None
}
}
}
fn main() {
// Daemonize
if unsafe { syscall::clone(CloneFlags::empty()).unwrap() }!= 0 {
return;
}
let _logger_ref = setup_logging();
let mut pcid_handle =
PcidServerHandle::connect_default().expect("nvmed: failed to setup channel to pcid");
let pci_config = pcid_handle
.fetch_config()
.expect("nvmed: failed to fetch config");
let bar = match pci_config.func.bars[0] {
PciBar::Memory(mem) => mem,
other => panic!("received a non-memory BAR ({:?})", other),
};
let bar_size = pci_config.func.bar_sizes[0];
let irq = pci_config.func.legacy_interrupt_line;
let mut name = pci_config.func.name();
name.push_str("_nvme");
log::info!("NVME PCI CONFIG: {:?}", pci_config);
let allocated_bars = AllocatedBars::default();
let address = unsafe {
syscall::physmap(
bar as usize,
bar_size as usize,
PHYSMAP_WRITE | PHYSMAP_NO_CACHE,
)
.expect("nvmed: failed to map address")
};
*allocated_bars.0[0].lock().unwrap() = Some(Bar {
physical: bar as usize,
bar_size: bar_size as usize,
ptr: NonNull::new(address as *mut u8).expect("Physmapping BAR gave nullptr"),
});
let event_fd = syscall::open("event:", syscall::O_RDWR | syscall::O_CLOEXEC)
.expect("nvmed: failed to open event queue");
let mut event_file = unsafe { File::from_raw_fd(event_fd as RawFd) };
let scheme_name = format!("disk/{}", name);
let socket_fd = syscall::open(
&format!(":{}", scheme_name),
syscall::O_RDWR | syscall::O_CREAT | syscall::O_NONBLOCK | syscall::O_CLOEXEC,
)
.expect("nvmed: failed to create disk scheme");
syscall::write(
event_fd,
&syscall::Event {
id: socket_fd,
flags: syscall::EVENT_READ,
data: 0,
},
)
.expect("nvmed: failed to watch disk scheme events");
let mut socket_file = unsafe { File::from_raw_fd(socket_fd as RawFd) };
let (reactor_sender, reactor_receiver) = crossbeam_channel::unbounded();
let (interrupt_method, interrupt_sources) =
get_int_method(&mut pcid_handle, &pci_config.func, &allocated_bars)
.expect("nvmed: failed to find a suitable interrupt method");
let mut nvme = Nvme::new(address, interrupt_method, pcid_handle, reactor_sender)
.expect("nvmed: failed to allocate driver data");
unsafe { nvme.init() }
log::debug!("Finished base initialization");
let nvme = Arc::new(nvme);
let reactor_thread = nvme::cq_reactor::start_cq_reactor_thread(Arc::clone(&nvme), interrupt_sources, reactor_receiver);
let namespaces = futures::executor::block_on(nvme.init_with_queues());
syscall::setrens(0, 0).expect("nvmed: failed to enter null namespace");
let mut scheme = DiskScheme::new(scheme_name, nvme, namespaces);
let mut todo = Vec::new();
'events: loop {
let mut event = Event::default();
if event_file
.read(&mut event)
.expect("nvmed: failed to read event queue")
== 0
{
break;
}
match event.data {
0 => loop {
let mut packet = Packet::default();
match socket_file.read(&mut packet) {
Ok(0) => break 'events,
Ok(_) => (),
Err(err) => match err.kind() {
ErrorKind::WouldBlock => break,
_ => Err(err).expect("nvmed: failed to read disk scheme"),
},
}
todo.push(packet);
},
unknown => {
panic!("nvmed: unknown event data {}", unknown);
}
}
let mut i = 0;
while i < todo.len() {
if let Some(a) = scheme.handle(&todo[i]) {
let mut packet = todo.remove(i);
packet.a = a;
socket_file
.write(&packet)
.expect("nvmed: failed to write disk scheme");
} else {
i += 1;
}
}
}
//TODO: destroy NVMe stuff
reactor_thread.join().expect("nvmed: failed to join reactor thread");
}
| {
eprintln!("nvmed: enabled logger");
Some(logger_ref)
} | conditional_block |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{ErrorKind, Read, Write};
use std::os::unix::io::{FromRawFd, RawFd};
use std::ptr::NonNull;
use std::sync::{Arc, Mutex};
use std::{slice, usize};
use pcid_interface::{PciBar, PciFeature, PciFeatureInfo, PciFunction, PcidServerHandle};
use syscall::{
CloneFlags, Event, Mmio, Packet, Result, SchemeBlockMut, PHYSMAP_NO_CACHE,
PHYSMAP_WRITE,
};
use redox_log::{OutputBuilder, RedoxLogger};
use self::nvme::{InterruptMethod, InterruptSources, Nvme};
use self::scheme::DiskScheme;
mod nvme;
mod scheme;
/// A wrapper for a BAR allocation.
pub struct Bar {
ptr: NonNull<u8>,
physical: usize,
bar_size: usize,
}
impl Bar {
pub fn allocate(bar: usize, bar_size: usize) -> Result<Self> {
Ok(Self {
ptr: NonNull::new(
unsafe { syscall::physmap(bar, bar_size, PHYSMAP_NO_CACHE | PHYSMAP_WRITE)? as *mut u8 },
)
.expect("Mapping a BAR resulted in a nullptr"),
physical: bar,
bar_size,
})
}
}
impl Drop for Bar {
fn drop(&mut self) {
let _ = unsafe { syscall::physunmap(self.physical) };
}
}
/// The PCI BARs that may be allocated.
#[derive(Default)]
pub struct AllocatedBars(pub [Mutex<Option<Bar>>; 6]);
/// Get the most optimal yet functional interrupt mechanism: either (in the order of preference):
/// MSI-X, MSI, and INTx# pin. Returns both runtime interrupt structures (MSI/MSI-X capability
/// structures), and the handles to the interrupts.
fn get_int_method(
pcid_handle: &mut PcidServerHandle,
function: &PciFunction,
allocated_bars: &AllocatedBars,
) -> Result<(InterruptMethod, InterruptSources)> {
log::trace!("Begin get_int_method");
use pcid_interface::irq_helpers;
let features = pcid_handle.fetch_all_features().unwrap();
let has_msi = features.iter().any(|(feature, _)| feature.is_msi());
let has_msix = features.iter().any(|(feature, _)| feature.is_msix());
// TODO: Allocate more than one vector when possible and useful.
if has_msix {
// Extended message signaled interrupts.
use self::nvme::MsixCfg;
use pcid_interface::msi::MsixTableEntry;
let mut capability_struct = match pcid_handle.feature_info(PciFeature::MsiX).unwrap() {
PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
fn bar_base(
allocated_bars: &AllocatedBars,
function: &PciFunction,
bir: u8,
) -> Result<NonNull<u8>> {
let bir = usize::from(bir);
let mut bar_guard = allocated_bars.0[bir].lock().unwrap();
match &mut *bar_guard {
&mut Some(ref bar) => Ok(bar.ptr),
bar_to_set @ &mut None => {
let bar = match function.bars[bir] {
PciBar::Memory(addr) => addr,
other => panic!("Expected memory BAR, found {:?}", other),
};
let bar_size = function.bar_sizes[bir];
let bar = Bar::allocate(bar as usize, bar_size as usize)?;
*bar_to_set = Some(bar);
Ok(bar_to_set.as_ref().unwrap().ptr)
}
}
}
let table_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.table_bir())?.as_ptr();
let pba_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.pba_bir())?.as_ptr();
let table_base =
unsafe { table_bar_base.offset(capability_struct.table_offset() as isize) };
let pba_base = unsafe { pba_bar_base.offset(capability_struct.pba_offset() as isize) };
let vector_count = capability_struct.table_size();
let table_entries: &'static mut [MsixTableEntry] = unsafe {
slice::from_raw_parts_mut(table_base as *mut MsixTableEntry, vector_count as usize)
};
let pba_entries: &'static mut [Mmio<u64>] = unsafe {
slice::from_raw_parts_mut(
table_base as *mut Mmio<u64>,
(vector_count as usize + 63) / 64,
)
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for table_entry in table_entries.iter_mut() {
table_entry.mask();
}
pcid_handle.enable_feature(PciFeature::MsiX).unwrap();
capability_struct.set_msix_enabled(true); // only affects our local mirror of the cap
let (msix_vector_number, irq_handle) = { |
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI-X interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data = msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector);
entry.set_addr_lo(msg_addr);
entry.set_msg_data(msg_data);
(0, irq_handle)
};
let interrupt_method = InterruptMethod::MsiX(MsixCfg {
cap: capability_struct,
table: table_entries,
pba: pba_entries,
});
let interrupt_sources =
InterruptSources::MsiX(std::iter::once((msix_vector_number, irq_handle)).collect());
Ok((interrupt_method, interrupt_sources))
} else if has_msi {
// Message signaled interrupts.
let capability_struct = match pcid_handle.feature_info(PciFeature::Msi).unwrap() {
PciFeatureInfo::Msi(msi) => msi,
_ => unreachable!(),
};
let (msi_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
use pcid_interface::{MsiSetFeatureInfo, SetFeatureInfo};
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read BSP APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data =
msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector) as u16;
pcid_handle.set_feature_info(SetFeatureInfo::Msi(MsiSetFeatureInfo {
message_address: Some(msg_addr),
message_upper_address: Some(0),
message_data: Some(msg_data),
multi_message_enable: Some(0), // enable 2^0=1 vectors
mask_bits: None,
})).unwrap();
(0, irq_handle)
};
let interrupt_method = InterruptMethod::Msi(capability_struct);
let interrupt_sources =
InterruptSources::Msi(std::iter::once((msi_vector_number, irq_handle)).collect());
pcid_handle.enable_feature(PciFeature::Msi).unwrap();
Ok((interrupt_method, interrupt_sources))
} else if function.legacy_interrupt_pin.is_some() {
// INTx# pin based interrupts.
let irq_handle = File::open(format!("irq:{}", function.legacy_interrupt_line))
.expect("nvmed: failed to open INTx# interrupt line");
Ok((InterruptMethod::Intx, InterruptSources::Intx(irq_handle)))
} else {
// No interrupts at all
todo!("handling of no interrupts")
}
}
fn setup_logging() -> Option<&'static RedoxLogger> {
let mut logger = RedoxLogger::new()
.with_output(
OutputBuilder::stderr()
.with_filter(log::LevelFilter::Info) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.log") {
Ok(b) => logger = logger.with_output(
// TODO: Add a configuration file for this
b.with_filter(log::LevelFilter::Info)
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.ansi.log") {
Ok(b) => logger = logger.with_output(
b.with_filter(log::LevelFilter::Info)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.ansi.log: {}", error),
}
match logger.enable() {
Ok(logger_ref) => {
eprintln!("nvmed: enabled logger");
Some(logger_ref)
}
Err(error) => {
eprintln!("nvmed: failed to set default logger: {}", error);
None
}
}
}
fn main() {
// Daemonize
if unsafe { syscall::clone(CloneFlags::empty()).unwrap() }!= 0 {
return;
}
let _logger_ref = setup_logging();
let mut pcid_handle =
PcidServerHandle::connect_default().expect("nvmed: failed to setup channel to pcid");
let pci_config = pcid_handle
.fetch_config()
.expect("nvmed: failed to fetch config");
let bar = match pci_config.func.bars[0] {
PciBar::Memory(mem) => mem,
other => panic!("received a non-memory BAR ({:?})", other),
};
let bar_size = pci_config.func.bar_sizes[0];
let irq = pci_config.func.legacy_interrupt_line;
let mut name = pci_config.func.name();
name.push_str("_nvme");
log::info!("NVME PCI CONFIG: {:?}", pci_config);
let allocated_bars = AllocatedBars::default();
let address = unsafe {
syscall::physmap(
bar as usize,
bar_size as usize,
PHYSMAP_WRITE | PHYSMAP_NO_CACHE,
)
.expect("nvmed: failed to map address")
};
*allocated_bars.0[0].lock().unwrap() = Some(Bar {
physical: bar as usize,
bar_size: bar_size as usize,
ptr: NonNull::new(address as *mut u8).expect("Physmapping BAR gave nullptr"),
});
let event_fd = syscall::open("event:", syscall::O_RDWR | syscall::O_CLOEXEC)
.expect("nvmed: failed to open event queue");
let mut event_file = unsafe { File::from_raw_fd(event_fd as RawFd) };
let scheme_name = format!("disk/{}", name);
let socket_fd = syscall::open(
&format!(":{}", scheme_name),
syscall::O_RDWR | syscall::O_CREAT | syscall::O_NONBLOCK | syscall::O_CLOEXEC,
)
.expect("nvmed: failed to create disk scheme");
syscall::write(
event_fd,
&syscall::Event {
id: socket_fd,
flags: syscall::EVENT_READ,
data: 0,
},
)
.expect("nvmed: failed to watch disk scheme events");
let mut socket_file = unsafe { File::from_raw_fd(socket_fd as RawFd) };
let (reactor_sender, reactor_receiver) = crossbeam_channel::unbounded();
let (interrupt_method, interrupt_sources) =
get_int_method(&mut pcid_handle, &pci_config.func, &allocated_bars)
.expect("nvmed: failed to find a suitable interrupt method");
let mut nvme = Nvme::new(address, interrupt_method, pcid_handle, reactor_sender)
.expect("nvmed: failed to allocate driver data");
unsafe { nvme.init() }
log::debug!("Finished base initialization");
let nvme = Arc::new(nvme);
let reactor_thread = nvme::cq_reactor::start_cq_reactor_thread(Arc::clone(&nvme), interrupt_sources, reactor_receiver);
let namespaces = futures::executor::block_on(nvme.init_with_queues());
syscall::setrens(0, 0).expect("nvmed: failed to enter null namespace");
let mut scheme = DiskScheme::new(scheme_name, nvme, namespaces);
let mut todo = Vec::new();
'events: loop {
let mut event = Event::default();
if event_file
.read(&mut event)
.expect("nvmed: failed to read event queue")
== 0
{
break;
}
match event.data {
0 => loop {
let mut packet = Packet::default();
match socket_file.read(&mut packet) {
Ok(0) => break 'events,
Ok(_) => (),
Err(err) => match err.kind() {
ErrorKind::WouldBlock => break,
_ => Err(err).expect("nvmed: failed to read disk scheme"),
},
}
todo.push(packet);
},
unknown => {
panic!("nvmed: unknown event data {}", unknown);
}
}
let mut i = 0;
while i < todo.len() {
if let Some(a) = scheme.handle(&todo[i]) {
let mut packet = todo.remove(i);
packet.a = a;
socket_file
.write(&packet)
.expect("nvmed: failed to write disk scheme");
} else {
i += 1;
}
}
}
//TODO: destroy NVMe stuff
reactor_thread.join().expect("nvmed: failed to join reactor thread");
} | use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
let entry: &mut MsixTableEntry = &mut table_entries[0]; | random_line_split |
postgres.rs | use crate::{connect, connection_wrapper::Connection, error::quaint_error_to_connector_error, SqlFlavour};
use enumflags2::BitFlags;
use indoc::indoc;
use migration_connector::{ConnectorError, ConnectorResult, MigrationDirectory, MigrationFeature};
use quaint::{connector::PostgresUrl, error::ErrorKind as QuaintKind, prelude::SqlFamily};
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference {.. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> | conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn reset(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?;
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_databases {
url.set_path(&format!("/{}", database_name));
match connect(url.as_str()).await {
// If the database does not exist, try the next one.
Err(err) => match &err.error_code() {
Some(DatabaseDoesNotExist::ERROR_CODE) => (),
_ => {
conn = Some(Err(err));
break;
}
},
// If the outcome is anything else, use this.
other_outcome => {
conn = Some(other_outcome);
break;
}
}
}
let conn = conn.ok_or_else(|| {
ConnectorError::user_facing_error(migration_engine::DatabaseCreationFailed { database_error: "Prisma could not connect to a default database (`postgres` or `template1`), it cannot create the specified database.".to_owned() })
})??;
Ok(conn)
}
| {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
); | identifier_body |
postgres.rs | use crate::{connect, connection_wrapper::Connection, error::quaint_error_to_connector_error, SqlFlavour};
use enumflags2::BitFlags;
use indoc::indoc;
use migration_connector::{ConnectorError, ConnectorResult, MigrationDirectory, MigrationFeature};
use quaint::{connector::PostgresUrl, error::ErrorKind as QuaintKind, prelude::SqlFamily};
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference {.. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn reset(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?; |
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_databases {
url.set_path(&format!("/{}", database_name));
match connect(url.as_str()).await {
// If the database does not exist, try the next one.
Err(err) => match &err.error_code() {
Some(DatabaseDoesNotExist::ERROR_CODE) => (),
_ => {
conn = Some(Err(err));
break;
}
},
// If the outcome is anything else, use this.
other_outcome => {
conn = Some(other_outcome);
break;
}
}
}
let conn = conn.ok_or_else(|| {
ConnectorError::user_facing_error(migration_engine::DatabaseCreationFailed { database_error: "Prisma could not connect to a default database (`postgres` or `template1`), it cannot create the specified database.".to_owned() })
})??;
Ok(conn)
} | random_line_split |
|
postgres.rs | use crate::{connect, connection_wrapper::Connection, error::quaint_error_to_connector_error, SqlFlavour};
use enumflags2::BitFlags;
use indoc::indoc;
use migration_connector::{ConnectorError, ConnectorResult, MigrationDirectory, MigrationFeature};
use quaint::{connector::PostgresUrl, error::ErrorKind as QuaintKind, prelude::SqlFamily};
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference {.. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn | (&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?;
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_databases {
url.set_path(&format!("/{}", database_name));
match connect(url.as_str()).await {
// If the database does not exist, try the next one.
Err(err) => match &err.error_code() {
Some(DatabaseDoesNotExist::ERROR_CODE) => (),
_ => {
conn = Some(Err(err));
break;
}
},
// If the outcome is anything else, use this.
other_outcome => {
conn = Some(other_outcome);
break;
}
}
}
let conn = conn.ok_or_else(|| {
ConnectorError::user_facing_error(migration_engine::DatabaseCreationFailed { database_error: "Prisma could not connect to a default database (`postgres` or `template1`), it cannot create the specified database.".to_owned() })
})??;
Ok(conn)
}
| reset | identifier_name |
postgres.rs | use crate::{connect, connection_wrapper::Connection, error::quaint_error_to_connector_error, SqlFlavour};
use enumflags2::BitFlags;
use indoc::indoc;
use migration_connector::{ConnectorError, ConnectorResult, MigrationDirectory, MigrationFeature};
use quaint::{connector::PostgresUrl, error::ErrorKind as QuaintKind, prelude::SqlFamily};
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists {.. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation {.. }) => |
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference {.. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn reset(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?;
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_databases {
url.set_path(&format!("/{}", database_name));
match connect(url.as_str()).await {
// If the database does not exist, try the next one.
Err(err) => match &err.error_code() {
Some(DatabaseDoesNotExist::ERROR_CODE) => (),
_ => {
conn = Some(Err(err));
break;
}
},
// If the outcome is anything else, use this.
other_outcome => {
conn = Some(other_outcome);
break;
}
}
}
let conn = conn.ok_or_else(|| {
ConnectorError::user_facing_error(migration_engine::DatabaseCreationFailed { database_error: "Prisma could not connect to a default database (`postgres` or `template1`), it cannot create the specified database.".to_owned() })
})??;
Ok(conn)
}
| {
database_already_exists_error = Some(err)
} | conditional_block |
bench.rs | use super::config::*;
use super::errors::*;
use super::out;
use super::symbols;
use bencher::stats::Summary;
use libc::c_void;
#[cfg(unix)]
use libc::{RTLD_GLOBAL, RTLD_LAZY, RTLD_LOCAL, RTLD_NOW};
#[cfg(unix)]
use libloading;
use libloading::{Library, Symbol};
use precision::{self, Elapsed, Precision};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::path::Path;
use std::process::Command;
use std::ptr;
const TEST_ABI_VERSION: u64 = 0x01;
const TEST_LIBRARIES_TABLE_SYMBOL: &[u8] = b"tests_config";
pub type TestCtx = *mut c_void;
pub type TestSetupFn = unsafe extern "C" fn(TestCtx, *mut TestCtx);
pub type TestBodyFn = unsafe extern "C" fn(TestCtx);
pub type TestTeardownFn = unsafe extern "C" fn(TestCtx);
/// C structure to set up a global context for the test suite
#[repr(C)]
struct TestsConfig {
global_setup: Option<unsafe extern "C" fn(*mut TestCtx)>,
global_teardown: Option<unsafe extern "C" fn(TestCtx)>,
version: u64,
}
/// A named test body function
#[derive(Clone, Debug)]
pub struct TestBody {
pub name: String,
pub body_fn: TestBodyFn,
}
/// An individual test, with function pointers for each step
#[derive(Clone, Debug)]
pub struct Test {
pub name: String,
pub setup_fn: Option<TestSetupFn>,
pub bodies: Vec<TestBody>,
pub teardown_fn: Option<TestTeardownFn>,
}
/// Measurements for a "body" of a test
#[derive(Clone)]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct AnonymousTestResult {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies
.clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError> {
let tests_symbols = symbols::extract_tests_symbols(library_path)?;
let library = load_library(library_path, false, true)?;
let tests_runner: Symbol<'_, &TestsConfig> =
unsafe { library.get(TEST_LIBRARIES_TABLE_SYMBOL) }.map_err(BenchError::from)?;
if tests_runner.version!= TEST_ABI_VERSION {
return Err(BenchError::ABIError("Incompatible ABI version"));
}
let tests = symbols::resolve(&tests_symbols, &library);
let mut global_ctx: TestCtx = ptr::null_mut();
if let Some(global_setup) = tests_runner.global_setup |
let test_results = run_tests(config, global_ctx, tests)?;
if let Some(global_teardown) = tests_runner.global_teardown {
unsafe { global_teardown(global_ctx) }
}
Ok(test_results)
}
/// Run an optional guard command
/// Returns `false` on success (return code = `0`), `true` on failure
fn disabled_due_to_guard(guard: &[String]) -> bool {
match Command::new(&guard[0]).args(&guard[1..]).status() {
Err(e) => {
eprintln!(
"Cannot run the [{}] guard script: [{}]",
&guard[0],
e.to_string()
);
true
}
Ok(status) =>!status.success(),
}
}
/// Entry point to run benchmarks according to a given configuration
pub fn bench(config: &Config) -> Result<(), BenchError> {
let mut test_suites_results: HashMap<String, HashMap<String, AnonymousTestResult>> =
HashMap::new();
for test_suite in &config.test_suites {
if let Some(guard) = &test_suite.guard {
if!guard.is_empty() && disabled_due_to_guard(guard) {
continue;
}
}
eprintln!("{}:", test_suite.name);
let library_path = &test_suite.library_path;
let test_results = bench_library(&config, Path::new(library_path))?;
for test_result in test_results {
let test_name_key = test_result.name.clone();
let anonymous_test_result = test_result.into();
if!test_suites_results.contains_key(&test_name_key) {
test_suites_results.insert(test_name_key.clone(), HashMap::new());
}
let results_for_test_name = test_suites_results.get_mut(&test_name_key).unwrap();
results_for_test_name.insert(test_suite.name.clone(), anonymous_test_result);
}
}
out::Out::new(test_suites_results).out_vec(&config.output)
}
| {
unsafe { global_setup(&mut global_ctx) }
} | conditional_block |
bench.rs | use super::config::*;
use super::errors::*;
use super::out;
use super::symbols;
use bencher::stats::Summary;
use libc::c_void;
#[cfg(unix)]
use libc::{RTLD_GLOBAL, RTLD_LAZY, RTLD_LOCAL, RTLD_NOW};
#[cfg(unix)]
use libloading;
use libloading::{Library, Symbol};
use precision::{self, Elapsed, Precision};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::path::Path;
use std::process::Command;
use std::ptr;
const TEST_ABI_VERSION: u64 = 0x01;
const TEST_LIBRARIES_TABLE_SYMBOL: &[u8] = b"tests_config";
pub type TestCtx = *mut c_void;
pub type TestSetupFn = unsafe extern "C" fn(TestCtx, *mut TestCtx);
pub type TestBodyFn = unsafe extern "C" fn(TestCtx);
pub type TestTeardownFn = unsafe extern "C" fn(TestCtx);
/// C structure to set up a global context for the test suite
#[repr(C)]
struct TestsConfig {
global_setup: Option<unsafe extern "C" fn(*mut TestCtx)>,
global_teardown: Option<unsafe extern "C" fn(TestCtx)>,
version: u64,
}
/// A named test body function
#[derive(Clone, Debug)]
pub struct TestBody {
pub name: String,
pub body_fn: TestBodyFn,
}
/// An individual test, with function pointers for each step
#[derive(Clone, Debug)]
pub struct Test {
pub name: String,
pub setup_fn: Option<TestSetupFn>,
pub bodies: Vec<TestBody>,
pub teardown_fn: Option<TestTeardownFn>,
}
/// Measurements for a "body" of a test
#[derive(Clone)]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct | {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies
.clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError> {
let tests_symbols = symbols::extract_tests_symbols(library_path)?;
let library = load_library(library_path, false, true)?;
let tests_runner: Symbol<'_, &TestsConfig> =
unsafe { library.get(TEST_LIBRARIES_TABLE_SYMBOL) }.map_err(BenchError::from)?;
if tests_runner.version!= TEST_ABI_VERSION {
return Err(BenchError::ABIError("Incompatible ABI version"));
}
let tests = symbols::resolve(&tests_symbols, &library);
let mut global_ctx: TestCtx = ptr::null_mut();
if let Some(global_setup) = tests_runner.global_setup {
unsafe { global_setup(&mut global_ctx) }
}
let test_results = run_tests(config, global_ctx, tests)?;
if let Some(global_teardown) = tests_runner.global_teardown {
unsafe { global_teardown(global_ctx) }
}
Ok(test_results)
}
/// Run an optional guard command
/// Returns `false` on success (return code = `0`), `true` on failure
fn disabled_due_to_guard(guard: &[String]) -> bool {
match Command::new(&guard[0]).args(&guard[1..]).status() {
Err(e) => {
eprintln!(
"Cannot run the [{}] guard script: [{}]",
&guard[0],
e.to_string()
);
true
}
Ok(status) =>!status.success(),
}
}
/// Entry point to run benchmarks according to a given configuration
pub fn bench(config: &Config) -> Result<(), BenchError> {
let mut test_suites_results: HashMap<String, HashMap<String, AnonymousTestResult>> =
HashMap::new();
for test_suite in &config.test_suites {
if let Some(guard) = &test_suite.guard {
if!guard.is_empty() && disabled_due_to_guard(guard) {
continue;
}
}
eprintln!("{}:", test_suite.name);
let library_path = &test_suite.library_path;
let test_results = bench_library(&config, Path::new(library_path))?;
for test_result in test_results {
let test_name_key = test_result.name.clone();
let anonymous_test_result = test_result.into();
if!test_suites_results.contains_key(&test_name_key) {
test_suites_results.insert(test_name_key.clone(), HashMap::new());
}
let results_for_test_name = test_suites_results.get_mut(&test_name_key).unwrap();
results_for_test_name.insert(test_suite.name.clone(), anonymous_test_result);
}
}
out::Out::new(test_suites_results).out_vec(&config.output)
}
| AnonymousTestResult | identifier_name |
bench.rs | use super::config::*;
use super::errors::*;
use super::out;
use super::symbols;
use bencher::stats::Summary;
use libc::c_void;
#[cfg(unix)]
use libc::{RTLD_GLOBAL, RTLD_LAZY, RTLD_LOCAL, RTLD_NOW};
#[cfg(unix)]
use libloading;
use libloading::{Library, Symbol};
use precision::{self, Elapsed, Precision};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::path::Path;
use std::process::Command;
use std::ptr;
const TEST_ABI_VERSION: u64 = 0x01;
const TEST_LIBRARIES_TABLE_SYMBOL: &[u8] = b"tests_config";
pub type TestCtx = *mut c_void;
pub type TestSetupFn = unsafe extern "C" fn(TestCtx, *mut TestCtx);
pub type TestBodyFn = unsafe extern "C" fn(TestCtx);
pub type TestTeardownFn = unsafe extern "C" fn(TestCtx);
/// C structure to set up a global context for the test suite
#[repr(C)]
struct TestsConfig {
global_setup: Option<unsafe extern "C" fn(*mut TestCtx)>,
global_teardown: Option<unsafe extern "C" fn(TestCtx)>,
version: u64,
}
/// A named test body function
#[derive(Clone, Debug)]
pub struct TestBody {
pub name: String,
pub body_fn: TestBodyFn,
}
/// An individual test, with function pointers for each step
#[derive(Clone, Debug)]
pub struct Test {
pub name: String,
pub setup_fn: Option<TestSetupFn>,
pub bodies: Vec<TestBody>,
pub teardown_fn: Option<TestTeardownFn>,
}
/// Measurements for a "body" of a test
#[derive(Clone)]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct AnonymousTestResult {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies
.clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError> {
let tests_symbols = symbols::extract_tests_symbols(library_path)?;
let library = load_library(library_path, false, true)?;
let tests_runner: Symbol<'_, &TestsConfig> =
unsafe { library.get(TEST_LIBRARIES_TABLE_SYMBOL) }.map_err(BenchError::from)?;
if tests_runner.version!= TEST_ABI_VERSION {
return Err(BenchError::ABIError("Incompatible ABI version"));
}
let tests = symbols::resolve(&tests_symbols, &library);
let mut global_ctx: TestCtx = ptr::null_mut();
if let Some(global_setup) = tests_runner.global_setup {
unsafe { global_setup(&mut global_ctx) }
}
let test_results = run_tests(config, global_ctx, tests)?;
if let Some(global_teardown) = tests_runner.global_teardown {
unsafe { global_teardown(global_ctx) }
}
Ok(test_results)
}
/// Run an optional guard command
/// Returns `false` on success (return code = `0`), `true` on failure
fn disabled_due_to_guard(guard: &[String]) -> bool {
match Command::new(&guard[0]).args(&guard[1..]).status() {
Err(e) => {
eprintln!(
"Cannot run the [{}] guard script: [{}]",
&guard[0],
e.to_string()
);
true
}
Ok(status) =>!status.success(),
}
}
/// Entry point to run benchmarks according to a given configuration
pub fn bench(config: &Config) -> Result<(), BenchError> {
let mut test_suites_results: HashMap<String, HashMap<String, AnonymousTestResult>> =
HashMap::new();
for test_suite in &config.test_suites {
if let Some(guard) = &test_suite.guard {
if!guard.is_empty() && disabled_due_to_guard(guard) {
continue;
}
}
eprintln!("{}:", test_suite.name); | let test_results = bench_library(&config, Path::new(library_path))?;
for test_result in test_results {
let test_name_key = test_result.name.clone();
let anonymous_test_result = test_result.into();
if!test_suites_results.contains_key(&test_name_key) {
test_suites_results.insert(test_name_key.clone(), HashMap::new());
}
let results_for_test_name = test_suites_results.get_mut(&test_name_key).unwrap();
results_for_test_name.insert(test_suite.name.clone(), anonymous_test_result);
}
}
out::Out::new(test_suites_results).out_vec(&config.output)
} | let library_path = &test_suite.library_path; | random_line_split |
bench.rs | use super::config::*;
use super::errors::*;
use super::out;
use super::symbols;
use bencher::stats::Summary;
use libc::c_void;
#[cfg(unix)]
use libc::{RTLD_GLOBAL, RTLD_LAZY, RTLD_LOCAL, RTLD_NOW};
#[cfg(unix)]
use libloading;
use libloading::{Library, Symbol};
use precision::{self, Elapsed, Precision};
use std::collections::HashMap;
use std::ffi::OsStr;
use std::path::Path;
use std::process::Command;
use std::ptr;
const TEST_ABI_VERSION: u64 = 0x01;
const TEST_LIBRARIES_TABLE_SYMBOL: &[u8] = b"tests_config";
pub type TestCtx = *mut c_void;
pub type TestSetupFn = unsafe extern "C" fn(TestCtx, *mut TestCtx);
pub type TestBodyFn = unsafe extern "C" fn(TestCtx);
pub type TestTeardownFn = unsafe extern "C" fn(TestCtx);
/// C structure to set up a global context for the test suite
#[repr(C)]
struct TestsConfig {
global_setup: Option<unsafe extern "C" fn(*mut TestCtx)>,
global_teardown: Option<unsafe extern "C" fn(TestCtx)>,
version: u64,
}
/// A named test body function
#[derive(Clone, Debug)]
pub struct TestBody {
pub name: String,
pub body_fn: TestBodyFn,
}
/// An individual test, with function pointers for each step
#[derive(Clone, Debug)]
pub struct Test {
pub name: String,
pub setup_fn: Option<TestSetupFn>,
pub bodies: Vec<TestBody>,
pub teardown_fn: Option<TestTeardownFn>,
}
/// Measurements for a "body" of a test
#[derive(Clone)]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct AnonymousTestResult {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> | .clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError> {
let tests_symbols = symbols::extract_tests_symbols(library_path)?;
let library = load_library(library_path, false, true)?;
let tests_runner: Symbol<'_, &TestsConfig> =
unsafe { library.get(TEST_LIBRARIES_TABLE_SYMBOL) }.map_err(BenchError::from)?;
if tests_runner.version!= TEST_ABI_VERSION {
return Err(BenchError::ABIError("Incompatible ABI version"));
}
let tests = symbols::resolve(&tests_symbols, &library);
let mut global_ctx: TestCtx = ptr::null_mut();
if let Some(global_setup) = tests_runner.global_setup {
unsafe { global_setup(&mut global_ctx) }
}
let test_results = run_tests(config, global_ctx, tests)?;
if let Some(global_teardown) = tests_runner.global_teardown {
unsafe { global_teardown(global_ctx) }
}
Ok(test_results)
}
/// Run an optional guard command
/// Returns `false` on success (return code = `0`), `true` on failure
fn disabled_due_to_guard(guard: &[String]) -> bool {
match Command::new(&guard[0]).args(&guard[1..]).status() {
Err(e) => {
eprintln!(
"Cannot run the [{}] guard script: [{}]",
&guard[0],
e.to_string()
);
true
}
Ok(status) =>!status.success(),
}
}
/// Entry point to run benchmarks according to a given configuration
pub fn bench(config: &Config) -> Result<(), BenchError> {
let mut test_suites_results: HashMap<String, HashMap<String, AnonymousTestResult>> =
HashMap::new();
for test_suite in &config.test_suites {
if let Some(guard) = &test_suite.guard {
if!guard.is_empty() && disabled_due_to_guard(guard) {
continue;
}
}
eprintln!("{}:", test_suite.name);
let library_path = &test_suite.library_path;
let test_results = bench_library(&config, Path::new(library_path))?;
for test_result in test_results {
let test_name_key = test_result.name.clone();
let anonymous_test_result = test_result.into();
if!test_suites_results.contains_key(&test_name_key) {
test_suites_results.insert(test_name_key.clone(), HashMap::new());
}
let results_for_test_name = test_suites_results.get_mut(&test_name_key).unwrap();
results_for_test_name.insert(test_suite.name.clone(), anonymous_test_result);
}
}
out::Out::new(test_suites_results).out_vec(&config.output)
}
| {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies | identifier_body |
opmapi.rs | // Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::basetsd::UINT64;
use shared::d3d9::IDirect3DDevice9;
use shared::d3d9types::D3DFORMAT;
use shared::guiddef::GUID;
use shared::minwindef::{BYTE, DWORD, ULONG};
use shared::windef::HMONITOR;
use um::dxva2api::DXVA2_SampleFormat;
use um::unknwnbase::{IUnknown, IUnknownVtbl};
use um::winnt::{HRESULT, LUID};
DEFINE_GUID!{OPM_GET_CURRENT_HDCP_SRM_VERSION,
0x99c5ceff, 0x5f1d, 0x4879, 0x81, 0xc1, 0xc5, 0x24, 0x43, 0xc9, 0x48, 0x2b}
DEFINE_GUID!{OPM_GET_CONNECTED_HDCP_DEVICE_INFORMATION,
0x0db59d74, 0xa992, 0x492e, 0xa0, 0xbd, 0xc2, 0x3f, 0xda, 0x56, 0x4e, 0x00}
DEFINE_GUID!{OPM_GET_ACP_AND_CGMSA_SIGNALING,
0x6629a591, 0x3b79, 0x4cf3, 0x92, 0x4a, 0x11, 0xe8, 0xe7, 0x81, 0x16, 0x71}
DEFINE_GUID!{OPM_GET_CONNECTOR_TYPE,
0x81d0bfd5, 0x6afe, 0x48c2, 0x99, 0xc0, 0x95, 0xa0, 0x8f, 0x97, 0xc5, 0xda}
DEFINE_GUID!{OPM_GET_SUPPORTED_PROTECTION_TYPES,
0x38f2a801, 0x9a6c, 0x48bb, 0x91, 0x07, 0xb6, 0x69, 0x6e, 0x6f, 0x17, 0x97}
DEFINE_GUID!{OPM_GET_VIRTUAL_PROTECTION_LEVEL,
0xb2075857, 0x3eda, 0x4d5d, 0x88, 0xdb, 0x74, 0x8f, 0x8c, 0x1a, 0x05, 0x49}
DEFINE_GUID!{OPM_GET_ACTUAL_PROTECTION_LEVEL,
0x1957210a, 0x7766, 0x452a, 0xb9, 0x9a, 0xd2, 0x7a, 0xed, 0x54, 0xf0, 0x3a}
DEFINE_GUID!{OPM_GET_ACTUAL_OUTPUT_FORMAT,
0xd7bf1ba3, 0xad13, 0x4f8e, 0xaf, 0x98, 0x0d, 0xcb, 0x3c, 0xa2, 0x04, 0xcc}
DEFINE_GUID!{OPM_GET_ADAPTER_BUS_TYPE,
0xc6f4d673, 0x6174, 0x4184, 0x8e, 0x35, 0xf6, 0xdb, 0x52, 0x0, 0xbc, 0xba}
DEFINE_GUID!{OPM_GET_OUTPUT_ID,
0x72cb6df3, 0x244f, 0x40ce, 0xb0, 0x9e, 0x20, 0x50, 0x6a, 0xf6, 0x30, 0x2f}
DEFINE_GUID!{OPM_GET_DVI_CHARACTERISTICS,
0xa470b3bb, 0x5dd7, 0x4172, 0x83, 0x9c, 0x3d, 0x37, 0x76, 0xe0, 0xeb, 0xf5}
DEFINE_GUID!{OPM_GET_CODEC_INFO,
0x4f374491, 0x8f5f, 0x4445, 0x9d, 0xba, 0x95, 0x58, 0x8f, 0x6b, 0x58, 0xb4}
DEFINE_GUID!{OPM_GET_OUTPUT_HARDWARE_PROTECTION_SUPPORT,
0x3b129589, 0x2af8, 0x4ef0, 0x96, 0xa2, 0x70, 0x4a, 0x84, 0x5a, 0x21, 0x8e}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL,
0x9bb9327c, 0x4eb5, 0x4727, 0x9f, 0x00, 0xb4, 0x2b, 0x09, 0x19, 0xc0, 0xda}
DEFINE_GUID!{OPM_SET_ACP_AND_CGMSA_SIGNALING,
0x09a631a5, 0xd684, 0x4c60, 0x8e, 0x4d, 0xd3, 0xbb, 0x0f, 0x0b, 0xe3, 0xee}
DEFINE_GUID!{OPM_SET_HDCP_SRM,
0x8b5ef5d1, 0xc30d, 0x44ff, 0x84, 0xa5, 0xea, 0x71, 0xdc, 0xe7, 0x8f, 0x13}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL_ACCORDING_TO_CSS_DVD,
0x39ce333e, 0x4cc0, 0x44ae, 0xbf, 0xcc, 0xda, 0x50, 0xb5, 0xf8, 0x2e, 0x72}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0001 {
OPM_OMAC_SIZE = 16,
OPM_128_BIT_RANDOM_NUMBER_SIZE = 16,
OPM_ENCRYPTED_INITIALIZATION_PARAMETERS_SIZE = 256,
OPM_CONFIGURE_SETTING_DATA_SIZE = 4056,
OPM_GET_INFORMATION_PARAMETERS_SIZE = 4056,
OPM_REQUESTED_INFORMATION_SIZE = 4076,
OPM_HDCP_KEY_SELECTION_VECTOR_SIZE = 5,
OPM_PROTECTION_TYPE_SIZE = 4,
OPM_BUS_TYPE_MASK = 0xffff,
OPM_BUS_IMPLEMENTATION_MODIFIER_MASK = 0x7fff,
}}
ENUM!{enum OPM_VIDEO_OUTPUT_SEMANTICS {
OPM_VOS_COPP_SEMANTICS = 0,
OPM_VOS_OPM_SEMANTICS = 1,
OPM_VOS_OPM_INDIRECT_DISPLAY = 2,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0002 {
OPM_HDCP_FLAG_NONE = 0,
OPM_HDCP_FLAG_REPEATER = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0003 {
OPM_STATUS_NORMAL = 0,
OPM_STATUS_LINK_LOST = 0x1,
OPM_STATUS_RENEGOTIATION_REQUIRED = 0x2,
OPM_STATUS_TAMPERING_DETECTED = 0x4,
OPM_STATUS_REVOKED_HDCP_DEVICE_ATTACHED = 0x8,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0004 {
OPM_CONNECTOR_TYPE_OTHER = -1i32 as u32,
OPM_CONNECTOR_TYPE_VGA = 0,
OPM_CONNECTOR_TYPE_SVIDEO = 1,
OPM_CONNECTOR_TYPE_COMPOSITE_VIDEO = 2,
OPM_CONNECTOR_TYPE_COMPONENT_VIDEO = 3,
OPM_CONNECTOR_TYPE_DVI = 4,
OPM_CONNECTOR_TYPE_HDMI = 5,
OPM_CONNECTOR_TYPE_LVDS = 6,
OPM_CONNECTOR_TYPE_D_JPN = 8,
OPM_CONNECTOR_TYPE_SDI = 9,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EXTERNAL = 10,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EMBEDDED = 11,
OPM_CONNECTOR_TYPE_UDI_EXTERNAL = 12,
OPM_CONNECTOR_TYPE_UDI_EMBEDDED = 13,
OPM_CONNECTOR_TYPE_RESERVED = 14,
OPM_CONNECTOR_TYPE_MIRACAST = 15,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_A = 16,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_B = 17,
OPM_COPP_COMPATIBLE_CONNECTOR_TYPE_INTERNAL = 0x80000000,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0005 {
OPM_DVI_CHARACTERISTIC_1_0 = 1,
OPM_DVI_CHARACTERISTIC_1_1_OR_ABOVE = 2,
}}
ENUM!{enum OPM_OUTPUT_HARDWARE_PROTECTION {
OPM_OUTPUT_HARDWARE_PROTECTION_NOT_SUPPORTED = 0,
OPM_OUTPUT_HARDWARE_PROTECTION_SUPPORTED = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0006 {
OPM_BUS_TYPE_OTHER = 0,
OPM_BUS_TYPE_PCI = 0x1,
OPM_BUS_TYPE_PCIX = 0x2,
OPM_BUS_TYPE_PCIEXPRESS = 0x3,
OPM_BUS_TYPE_AGP = 0x4,
OPM_BUS_IMPLEMENTATION_MODIFIER_INSIDE_OF_CHIPSET = 0x10000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP = 0x20000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET = 0x30000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR = 0x40000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE = 0x50000,
OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD = 0x80000000,
OPM_COPP_COMPATIBLE_BUS_TYPE_INTEGRATED = 0x80000000,
}}
ENUM!{enum OPM_DPCP_PROTECTION_LEVEL {
OPM_DPCP_OFF = 0,
OPM_DPCP_ON = 1,
OPM_DPCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_HDCP_PROTECTION_LEVEL {
OPM_HDCP_OFF = 0,
OPM_HDCP_ON = 1,
OPM_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_TYPE_ENFORCEMENT_HDCP_PROTECTION_LEVEL {
OPM_TYPE_ENFORCEMENT_HDCP_OFF = OPM_HDCP_OFF,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_NO_TYPE_RESTRICTION = OPM_HDCP_ON,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_TYPE1_RESTRICTION = OPM_HDCP_ON + 1,
OPM_TYPE_ENFORCEMENT_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0007 {
OPM_CGMSA_OFF = 0,
OPM_CGMSA_COPY_FREELY = 0x1,
OPM_CGMSA_COPY_NO_MORE = 0x2,
OPM_CGMSA_COPY_ONE_GENERATION = 0x3,
OPM_CGMSA_COPY_NEVER = 0x4,
OPM_CGMSA_REDISTRIBUTION_CONTROL_REQUIRED = 0x8,
}}
ENUM!{enum OPM_ACP_PROTECTION_LEVEL {
OPM_ACP_OFF = 0,
OPM_ACP_LEVEL_ONE = 1,
OPM_ACP_LEVEL_TWO = 2,
OPM_ACP_LEVEL_THREE = 3,
OPM_ACP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0008 {
OPM_PROTECTION_TYPE_OTHER = 0x80000000,
OPM_PROTECTION_TYPE_NONE = 0,
OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x1,
OPM_PROTECTION_TYPE_ACP = 0x2,
OPM_PROTECTION_TYPE_CGMSA = 0x4,
OPM_PROTECTION_TYPE_HDCP = 0x8,
OPM_PROTECTION_TYPE_DPCP = 0x10,
OPM_PROTECTION_TYPE_TYPE_ENFORCEMENT_HDCP = 0x20,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0009 {
OPM_PROTECTION_STANDARD_OTHER = 0x80000000,
OPM_PROTECTION_STANDARD_NONE = 0,
OPM_PROTECTION_STANDARD_IEC61880_525I = 0x1,
OPM_PROTECTION_STANDARD_IEC61880_2_525I = 0x2,
OPM_PROTECTION_STANDARD_IEC62375_625P = 0x4,
OPM_PROTECTION_STANDARD_EIA608B_525 = 0x8,
OPM_PROTECTION_STANDARD_EN300294_625I = 0x10,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_525P = 0x20,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_750P = 0x40,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_1125I = 0x80,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_525P = 0x100,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_750P = 0x200,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_1125I = 0x400,
OPM_PROTECTION_STANDARD_ARIBTRB15_525I = 0x800,
OPM_PROTECTION_STANDARD_ARIBTRB15_525P = 0x1000,
OPM_PROTECTION_STANDARD_ARIBTRB15_750P = 0x2000,
OPM_PROTECTION_STANDARD_ARIBTRB15_1125I = 0x4000,
}}
ENUM!{enum OPM_IMAGE_ASPECT_RATIO_EN300294 {
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3 = 0,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_CENTER = 1,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_TOP = 2,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_CENTER = 3,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_TOP = 4,
OPM_ASPECT_RATIO_EN300294_BOX_GT_16_BY_9_CENTER = 5,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3_PROTECTED_CENTER = 6,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_16_BY_9_ANAMORPHIC = 7,
OPM_ASPECT_RATIO_FORCE_ULONG = 0x7fffffff,
}}
STRUCT!{#[repr(packed)] struct OPM_RANDOM_NUMBER {
abRandomNumber: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_OMAC {
abOMAC: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_ENCRYPTED_INITIALIZATION_PARAMETERS {
abEncryptedInitializationParameters: [BYTE; 256],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_INFO_PARAMETERS {
omac: OPM_OMAC,
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS {
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_HDCP_KEY_SELECTION_VECTOR {
abKeySelectionVector: [BYTE; 5],
}}
STRUCT!{#[repr(packed)] struct OPM_CONNECTED_HDCP_DEVICE_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulHDCPFlags: ULONG,
ksvB: OPM_HDCP_KEY_SELECTION_VECTOR,
Reserved: [BYTE; 11],
Reserved2: [BYTE; 16],
Reserved3: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_REQUESTED_INFORMATION {
omac: OPM_OMAC,
cbRequestedInformationSize: ULONG,
abRequestedInformation: [BYTE; 4076],
}}
STRUCT!{#[repr(packed)] struct OPM_STANDARD_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulInformation: ULONG,
ulReserved: ULONG,
ulReserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACTUAL_OUTPUT_FORMAT {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulDisplayWidth: ULONG,
ulDisplayHeight: ULONG,
dsfSampleInterleaveFormat: DXVA2_SampleFormat,
d3dFormat: D3DFORMAT,
ulFrequencyNumerator: ULONG,
ulFrequencyDenominator: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACP_AND_CGMSA_SIGNALING {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulAvailableTVProtectionStandards: ULONG,
ulActiveTVProtectionStandard: ULONG,
ulReserved: ULONG,
ulAspectRatioValidMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioValidMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioValidMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved2: [ULONG; 4],
ulReserved3: [ULONG; 4],
}}
STRUCT!{#[repr(packed)] struct OPM_OUTPUT_ID_DATA {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
OutputId: UINT64,
}}
STRUCT!{#[repr(packed)] struct OPM_CONFIGURE_PARAMETERS {
omac: OPM_OMAC,
guidSetting: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_SET_PROTECTION_LEVEL_PARAMETERS {
ulProtectionType: ULONG,
ulProtectionLevel: ULONG,
Reserved: ULONG,
Reserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_ACP_AND_CGMSA_SIGNALING_PARAMETERS {
ulNewTVProtectionStandard: ULONG,
ulAspectRatioChangeMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioChangeMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioChangeMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved: [ULONG; 4],
ulReserved2: [ULONG; 4],
ulReserved3: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_HDCP_SRM_PARAMETERS {
ulSRMVersion: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_PARAMETERS {
cbVerifier: DWORD,
Verifier: [BYTE; 4052],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
Merit: DWORD,
}}
DEFINE_GUID!{IID_IOPMVideoOutput,
0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d}
RIDL!{#[uuid(0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d)]
interface IOPMVideoOutput(IOPMVideoOutputVtbl): IUnknown(IUnknownVtbl) {
fn StartInitialization(
prnRandomNumber: *mut OPM_RANDOM_NUMBER,
ppbCertificate: *mut *mut BYTE,
pulCertificateLength: *mut ULONG,
) -> HRESULT,
fn FinishInitialization(
pParameters: *const OPM_ENCRYPTED_INITIALIZATION_PARAMETERS,
) -> HRESULT,
fn GetInformation(
pParameters: *const OPM_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn COPPCompatibleGetInformation(
pParameters: *const OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn Configure(
pParameters: *const OPM_CONFIGURE_PARAMETERS,
ulAdditionalParametersSize: ULONG,
pbAdditionalParameters: *const BYTE,
) -> HRESULT,
}}
#[inline]
pub fn GetBusType(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_TYPE_MASK
}
#[inline]
pub fn GetBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG {
(ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_MASK) >> 16
}
#[inline]
pub fn IsNonStandardBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG |
extern "system" {
pub fn OPMGetVideoOutputsFromHMONITOR(
hMonitor: HMONITOR,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
pulNumVideoOutputs: *mut ULONG,
pppOPMVideoOutputArray: *mut *mut *mut IOPMVideoOutput,
) -> HRESULT;
pub fn OPMGetVideoOutputForTarget(
pAdapterLuid: *mut LUID,
VidPnTarget: ULONG,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
ppOPMVideoOutput: *mut *mut IOPMVideoOutput,
) -> HRESULT;
pub fn OPMGetVideoOutputsFromIDirect3DDevice9Object(
pDirect3DDevice9: *mut IDirect3DDevice9,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
pulNumVideoOutputs: *mut ULONG,
pppOPMVideoOutputArray: *mut *mut *mut IOPMVideoOutput,
) -> HRESULT;
}
| {
ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD
} | identifier_body |
opmapi.rs | // Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::basetsd::UINT64;
use shared::d3d9::IDirect3DDevice9;
use shared::d3d9types::D3DFORMAT;
use shared::guiddef::GUID;
use shared::minwindef::{BYTE, DWORD, ULONG};
use shared::windef::HMONITOR;
use um::dxva2api::DXVA2_SampleFormat;
use um::unknwnbase::{IUnknown, IUnknownVtbl};
use um::winnt::{HRESULT, LUID};
DEFINE_GUID!{OPM_GET_CURRENT_HDCP_SRM_VERSION,
0x99c5ceff, 0x5f1d, 0x4879, 0x81, 0xc1, 0xc5, 0x24, 0x43, 0xc9, 0x48, 0x2b}
DEFINE_GUID!{OPM_GET_CONNECTED_HDCP_DEVICE_INFORMATION,
0x0db59d74, 0xa992, 0x492e, 0xa0, 0xbd, 0xc2, 0x3f, 0xda, 0x56, 0x4e, 0x00}
DEFINE_GUID!{OPM_GET_ACP_AND_CGMSA_SIGNALING,
0x6629a591, 0x3b79, 0x4cf3, 0x92, 0x4a, 0x11, 0xe8, 0xe7, 0x81, 0x16, 0x71}
DEFINE_GUID!{OPM_GET_CONNECTOR_TYPE,
0x81d0bfd5, 0x6afe, 0x48c2, 0x99, 0xc0, 0x95, 0xa0, 0x8f, 0x97, 0xc5, 0xda}
DEFINE_GUID!{OPM_GET_SUPPORTED_PROTECTION_TYPES,
0x38f2a801, 0x9a6c, 0x48bb, 0x91, 0x07, 0xb6, 0x69, 0x6e, 0x6f, 0x17, 0x97}
DEFINE_GUID!{OPM_GET_VIRTUAL_PROTECTION_LEVEL,
0xb2075857, 0x3eda, 0x4d5d, 0x88, 0xdb, 0x74, 0x8f, 0x8c, 0x1a, 0x05, 0x49}
DEFINE_GUID!{OPM_GET_ACTUAL_PROTECTION_LEVEL,
0x1957210a, 0x7766, 0x452a, 0xb9, 0x9a, 0xd2, 0x7a, 0xed, 0x54, 0xf0, 0x3a}
DEFINE_GUID!{OPM_GET_ACTUAL_OUTPUT_FORMAT,
0xd7bf1ba3, 0xad13, 0x4f8e, 0xaf, 0x98, 0x0d, 0xcb, 0x3c, 0xa2, 0x04, 0xcc}
DEFINE_GUID!{OPM_GET_ADAPTER_BUS_TYPE,
0xc6f4d673, 0x6174, 0x4184, 0x8e, 0x35, 0xf6, 0xdb, 0x52, 0x0, 0xbc, 0xba}
DEFINE_GUID!{OPM_GET_OUTPUT_ID,
0x72cb6df3, 0x244f, 0x40ce, 0xb0, 0x9e, 0x20, 0x50, 0x6a, 0xf6, 0x30, 0x2f}
DEFINE_GUID!{OPM_GET_DVI_CHARACTERISTICS,
0xa470b3bb, 0x5dd7, 0x4172, 0x83, 0x9c, 0x3d, 0x37, 0x76, 0xe0, 0xeb, 0xf5}
DEFINE_GUID!{OPM_GET_CODEC_INFO,
0x4f374491, 0x8f5f, 0x4445, 0x9d, 0xba, 0x95, 0x58, 0x8f, 0x6b, 0x58, 0xb4}
DEFINE_GUID!{OPM_GET_OUTPUT_HARDWARE_PROTECTION_SUPPORT,
0x3b129589, 0x2af8, 0x4ef0, 0x96, 0xa2, 0x70, 0x4a, 0x84, 0x5a, 0x21, 0x8e}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL,
0x9bb9327c, 0x4eb5, 0x4727, 0x9f, 0x00, 0xb4, 0x2b, 0x09, 0x19, 0xc0, 0xda}
DEFINE_GUID!{OPM_SET_ACP_AND_CGMSA_SIGNALING,
0x09a631a5, 0xd684, 0x4c60, 0x8e, 0x4d, 0xd3, 0xbb, 0x0f, 0x0b, 0xe3, 0xee}
DEFINE_GUID!{OPM_SET_HDCP_SRM,
0x8b5ef5d1, 0xc30d, 0x44ff, 0x84, 0xa5, 0xea, 0x71, 0xdc, 0xe7, 0x8f, 0x13}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL_ACCORDING_TO_CSS_DVD,
0x39ce333e, 0x4cc0, 0x44ae, 0xbf, 0xcc, 0xda, 0x50, 0xb5, 0xf8, 0x2e, 0x72}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0001 {
OPM_OMAC_SIZE = 16,
OPM_128_BIT_RANDOM_NUMBER_SIZE = 16,
OPM_ENCRYPTED_INITIALIZATION_PARAMETERS_SIZE = 256,
OPM_CONFIGURE_SETTING_DATA_SIZE = 4056,
OPM_GET_INFORMATION_PARAMETERS_SIZE = 4056,
OPM_REQUESTED_INFORMATION_SIZE = 4076,
OPM_HDCP_KEY_SELECTION_VECTOR_SIZE = 5,
OPM_PROTECTION_TYPE_SIZE = 4,
OPM_BUS_TYPE_MASK = 0xffff,
OPM_BUS_IMPLEMENTATION_MODIFIER_MASK = 0x7fff,
}}
ENUM!{enum OPM_VIDEO_OUTPUT_SEMANTICS {
OPM_VOS_COPP_SEMANTICS = 0,
OPM_VOS_OPM_SEMANTICS = 1,
OPM_VOS_OPM_INDIRECT_DISPLAY = 2,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0002 {
OPM_HDCP_FLAG_NONE = 0,
OPM_HDCP_FLAG_REPEATER = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0003 {
OPM_STATUS_NORMAL = 0,
OPM_STATUS_LINK_LOST = 0x1,
OPM_STATUS_RENEGOTIATION_REQUIRED = 0x2,
OPM_STATUS_TAMPERING_DETECTED = 0x4,
OPM_STATUS_REVOKED_HDCP_DEVICE_ATTACHED = 0x8,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0004 {
OPM_CONNECTOR_TYPE_OTHER = -1i32 as u32,
OPM_CONNECTOR_TYPE_VGA = 0,
OPM_CONNECTOR_TYPE_SVIDEO = 1,
OPM_CONNECTOR_TYPE_COMPOSITE_VIDEO = 2,
OPM_CONNECTOR_TYPE_COMPONENT_VIDEO = 3,
OPM_CONNECTOR_TYPE_DVI = 4,
OPM_CONNECTOR_TYPE_HDMI = 5,
OPM_CONNECTOR_TYPE_LVDS = 6,
OPM_CONNECTOR_TYPE_D_JPN = 8,
OPM_CONNECTOR_TYPE_SDI = 9,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EXTERNAL = 10,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EMBEDDED = 11,
OPM_CONNECTOR_TYPE_UDI_EXTERNAL = 12,
OPM_CONNECTOR_TYPE_UDI_EMBEDDED = 13,
OPM_CONNECTOR_TYPE_RESERVED = 14,
OPM_CONNECTOR_TYPE_MIRACAST = 15,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_A = 16,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_B = 17,
OPM_COPP_COMPATIBLE_CONNECTOR_TYPE_INTERNAL = 0x80000000,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0005 {
OPM_DVI_CHARACTERISTIC_1_0 = 1,
OPM_DVI_CHARACTERISTIC_1_1_OR_ABOVE = 2,
}}
ENUM!{enum OPM_OUTPUT_HARDWARE_PROTECTION {
OPM_OUTPUT_HARDWARE_PROTECTION_NOT_SUPPORTED = 0,
OPM_OUTPUT_HARDWARE_PROTECTION_SUPPORTED = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0006 {
OPM_BUS_TYPE_OTHER = 0,
OPM_BUS_TYPE_PCI = 0x1,
OPM_BUS_TYPE_PCIX = 0x2,
OPM_BUS_TYPE_PCIEXPRESS = 0x3,
OPM_BUS_TYPE_AGP = 0x4,
OPM_BUS_IMPLEMENTATION_MODIFIER_INSIDE_OF_CHIPSET = 0x10000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP = 0x20000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET = 0x30000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR = 0x40000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE = 0x50000,
OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD = 0x80000000,
OPM_COPP_COMPATIBLE_BUS_TYPE_INTEGRATED = 0x80000000,
}}
ENUM!{enum OPM_DPCP_PROTECTION_LEVEL {
OPM_DPCP_OFF = 0,
OPM_DPCP_ON = 1,
OPM_DPCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_HDCP_PROTECTION_LEVEL {
OPM_HDCP_OFF = 0,
OPM_HDCP_ON = 1,
OPM_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_TYPE_ENFORCEMENT_HDCP_PROTECTION_LEVEL {
OPM_TYPE_ENFORCEMENT_HDCP_OFF = OPM_HDCP_OFF,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_NO_TYPE_RESTRICTION = OPM_HDCP_ON,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_TYPE1_RESTRICTION = OPM_HDCP_ON + 1,
OPM_TYPE_ENFORCEMENT_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0007 {
OPM_CGMSA_OFF = 0,
OPM_CGMSA_COPY_FREELY = 0x1,
OPM_CGMSA_COPY_NO_MORE = 0x2,
OPM_CGMSA_COPY_ONE_GENERATION = 0x3,
OPM_CGMSA_COPY_NEVER = 0x4,
OPM_CGMSA_REDISTRIBUTION_CONTROL_REQUIRED = 0x8,
}}
ENUM!{enum OPM_ACP_PROTECTION_LEVEL {
OPM_ACP_OFF = 0,
OPM_ACP_LEVEL_ONE = 1,
OPM_ACP_LEVEL_TWO = 2,
OPM_ACP_LEVEL_THREE = 3,
OPM_ACP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0008 {
OPM_PROTECTION_TYPE_OTHER = 0x80000000,
OPM_PROTECTION_TYPE_NONE = 0,
OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x1,
OPM_PROTECTION_TYPE_ACP = 0x2,
OPM_PROTECTION_TYPE_CGMSA = 0x4,
OPM_PROTECTION_TYPE_HDCP = 0x8,
OPM_PROTECTION_TYPE_DPCP = 0x10,
OPM_PROTECTION_TYPE_TYPE_ENFORCEMENT_HDCP = 0x20,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0009 {
OPM_PROTECTION_STANDARD_OTHER = 0x80000000,
OPM_PROTECTION_STANDARD_NONE = 0,
OPM_PROTECTION_STANDARD_IEC61880_525I = 0x1,
OPM_PROTECTION_STANDARD_IEC61880_2_525I = 0x2,
OPM_PROTECTION_STANDARD_IEC62375_625P = 0x4,
OPM_PROTECTION_STANDARD_EIA608B_525 = 0x8,
OPM_PROTECTION_STANDARD_EN300294_625I = 0x10,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_525P = 0x20,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_750P = 0x40,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_1125I = 0x80,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_525P = 0x100,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_750P = 0x200,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_1125I = 0x400,
OPM_PROTECTION_STANDARD_ARIBTRB15_525I = 0x800,
OPM_PROTECTION_STANDARD_ARIBTRB15_525P = 0x1000,
OPM_PROTECTION_STANDARD_ARIBTRB15_750P = 0x2000,
OPM_PROTECTION_STANDARD_ARIBTRB15_1125I = 0x4000,
}}
ENUM!{enum OPM_IMAGE_ASPECT_RATIO_EN300294 {
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3 = 0,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_CENTER = 1,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_TOP = 2,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_CENTER = 3,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_TOP = 4,
OPM_ASPECT_RATIO_EN300294_BOX_GT_16_BY_9_CENTER = 5,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3_PROTECTED_CENTER = 6,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_16_BY_9_ANAMORPHIC = 7,
OPM_ASPECT_RATIO_FORCE_ULONG = 0x7fffffff,
}}
STRUCT!{#[repr(packed)] struct OPM_RANDOM_NUMBER {
abRandomNumber: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_OMAC {
abOMAC: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_ENCRYPTED_INITIALIZATION_PARAMETERS {
abEncryptedInitializationParameters: [BYTE; 256],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_INFO_PARAMETERS {
omac: OPM_OMAC,
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS {
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_HDCP_KEY_SELECTION_VECTOR {
abKeySelectionVector: [BYTE; 5],
}}
STRUCT!{#[repr(packed)] struct OPM_CONNECTED_HDCP_DEVICE_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulHDCPFlags: ULONG,
ksvB: OPM_HDCP_KEY_SELECTION_VECTOR,
Reserved: [BYTE; 11],
Reserved2: [BYTE; 16],
Reserved3: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_REQUESTED_INFORMATION {
omac: OPM_OMAC,
cbRequestedInformationSize: ULONG,
abRequestedInformation: [BYTE; 4076],
}}
STRUCT!{#[repr(packed)] struct OPM_STANDARD_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulInformation: ULONG,
ulReserved: ULONG,
ulReserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACTUAL_OUTPUT_FORMAT {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulDisplayWidth: ULONG,
ulDisplayHeight: ULONG,
dsfSampleInterleaveFormat: DXVA2_SampleFormat,
d3dFormat: D3DFORMAT,
ulFrequencyNumerator: ULONG,
ulFrequencyDenominator: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACP_AND_CGMSA_SIGNALING {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulAvailableTVProtectionStandards: ULONG,
ulActiveTVProtectionStandard: ULONG,
ulReserved: ULONG,
ulAspectRatioValidMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioValidMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioValidMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved2: [ULONG; 4],
ulReserved3: [ULONG; 4],
}}
STRUCT!{#[repr(packed)] struct OPM_OUTPUT_ID_DATA {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
OutputId: UINT64,
}}
STRUCT!{#[repr(packed)] struct OPM_CONFIGURE_PARAMETERS {
omac: OPM_OMAC,
guidSetting: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_SET_PROTECTION_LEVEL_PARAMETERS {
ulProtectionType: ULONG,
ulProtectionLevel: ULONG,
Reserved: ULONG,
Reserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_ACP_AND_CGMSA_SIGNALING_PARAMETERS {
ulNewTVProtectionStandard: ULONG,
ulAspectRatioChangeMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioChangeMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioChangeMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved: [ULONG; 4],
ulReserved2: [ULONG; 4],
ulReserved3: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_HDCP_SRM_PARAMETERS {
ulSRMVersion: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_PARAMETERS {
cbVerifier: DWORD,
Verifier: [BYTE; 4052],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
Merit: DWORD,
}}
DEFINE_GUID!{IID_IOPMVideoOutput,
0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d}
RIDL!{#[uuid(0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d)]
interface IOPMVideoOutput(IOPMVideoOutputVtbl): IUnknown(IUnknownVtbl) {
fn StartInitialization(
prnRandomNumber: *mut OPM_RANDOM_NUMBER,
ppbCertificate: *mut *mut BYTE,
pulCertificateLength: *mut ULONG,
) -> HRESULT,
fn FinishInitialization(
pParameters: *const OPM_ENCRYPTED_INITIALIZATION_PARAMETERS,
) -> HRESULT,
fn GetInformation(
pParameters: *const OPM_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn COPPCompatibleGetInformation(
pParameters: *const OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn Configure(
pParameters: *const OPM_CONFIGURE_PARAMETERS,
ulAdditionalParametersSize: ULONG,
pbAdditionalParameters: *const BYTE,
) -> HRESULT,
}}
#[inline]
pub fn GetBusType(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_TYPE_MASK
}
#[inline]
pub fn | (ulBusTypeAndImplementation: ULONG) -> ULONG {
(ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_MASK) >> 16
}
#[inline]
pub fn IsNonStandardBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD
}
extern "system" {
pub fn OPMGetVideoOutputsFromHMONITOR(
hMonitor: HMONITOR,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
pulNumVideoOutputs: *mut ULONG,
pppOPMVideoOutputArray: *mut *mut *mut IOPMVideoOutput,
) -> HRESULT;
pub fn OPMGetVideoOutputForTarget(
pAdapterLuid: *mut LUID,
VidPnTarget: ULONG,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
ppOPMVideoOutput: *mut *mut IOPMVideoOutput,
) -> HRESULT;
pub fn OPMGetVideoOutputsFromIDirect3DDevice9Object(
pDirect3DDevice9: *mut IDirect3DDevice9,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
pulNumVideoOutputs: *mut ULONG,
pppOPMVideoOutputArray: *mut *mut *mut IOPMVideoOutput,
) -> HRESULT;
}
| GetBusImplementation | identifier_name |
opmapi.rs | // Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::basetsd::UINT64;
use shared::d3d9::IDirect3DDevice9;
use shared::d3d9types::D3DFORMAT;
use shared::guiddef::GUID;
use shared::minwindef::{BYTE, DWORD, ULONG};
use shared::windef::HMONITOR;
use um::dxva2api::DXVA2_SampleFormat;
use um::unknwnbase::{IUnknown, IUnknownVtbl};
use um::winnt::{HRESULT, LUID};
DEFINE_GUID!{OPM_GET_CURRENT_HDCP_SRM_VERSION,
0x99c5ceff, 0x5f1d, 0x4879, 0x81, 0xc1, 0xc5, 0x24, 0x43, 0xc9, 0x48, 0x2b}
DEFINE_GUID!{OPM_GET_CONNECTED_HDCP_DEVICE_INFORMATION,
0x0db59d74, 0xa992, 0x492e, 0xa0, 0xbd, 0xc2, 0x3f, 0xda, 0x56, 0x4e, 0x00}
DEFINE_GUID!{OPM_GET_ACP_AND_CGMSA_SIGNALING,
0x6629a591, 0x3b79, 0x4cf3, 0x92, 0x4a, 0x11, 0xe8, 0xe7, 0x81, 0x16, 0x71}
DEFINE_GUID!{OPM_GET_CONNECTOR_TYPE,
0x81d0bfd5, 0x6afe, 0x48c2, 0x99, 0xc0, 0x95, 0xa0, 0x8f, 0x97, 0xc5, 0xda}
DEFINE_GUID!{OPM_GET_SUPPORTED_PROTECTION_TYPES,
0x38f2a801, 0x9a6c, 0x48bb, 0x91, 0x07, 0xb6, 0x69, 0x6e, 0x6f, 0x17, 0x97}
DEFINE_GUID!{OPM_GET_VIRTUAL_PROTECTION_LEVEL,
0xb2075857, 0x3eda, 0x4d5d, 0x88, 0xdb, 0x74, 0x8f, 0x8c, 0x1a, 0x05, 0x49}
DEFINE_GUID!{OPM_GET_ACTUAL_PROTECTION_LEVEL,
0x1957210a, 0x7766, 0x452a, 0xb9, 0x9a, 0xd2, 0x7a, 0xed, 0x54, 0xf0, 0x3a}
DEFINE_GUID!{OPM_GET_ACTUAL_OUTPUT_FORMAT,
0xd7bf1ba3, 0xad13, 0x4f8e, 0xaf, 0x98, 0x0d, 0xcb, 0x3c, 0xa2, 0x04, 0xcc}
DEFINE_GUID!{OPM_GET_ADAPTER_BUS_TYPE,
0xc6f4d673, 0x6174, 0x4184, 0x8e, 0x35, 0xf6, 0xdb, 0x52, 0x0, 0xbc, 0xba}
DEFINE_GUID!{OPM_GET_OUTPUT_ID,
0x72cb6df3, 0x244f, 0x40ce, 0xb0, 0x9e, 0x20, 0x50, 0x6a, 0xf6, 0x30, 0x2f}
DEFINE_GUID!{OPM_GET_DVI_CHARACTERISTICS,
0xa470b3bb, 0x5dd7, 0x4172, 0x83, 0x9c, 0x3d, 0x37, 0x76, 0xe0, 0xeb, 0xf5}
DEFINE_GUID!{OPM_GET_CODEC_INFO,
0x4f374491, 0x8f5f, 0x4445, 0x9d, 0xba, 0x95, 0x58, 0x8f, 0x6b, 0x58, 0xb4}
DEFINE_GUID!{OPM_GET_OUTPUT_HARDWARE_PROTECTION_SUPPORT,
0x3b129589, 0x2af8, 0x4ef0, 0x96, 0xa2, 0x70, 0x4a, 0x84, 0x5a, 0x21, 0x8e}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL,
0x9bb9327c, 0x4eb5, 0x4727, 0x9f, 0x00, 0xb4, 0x2b, 0x09, 0x19, 0xc0, 0xda}
DEFINE_GUID!{OPM_SET_ACP_AND_CGMSA_SIGNALING,
0x09a631a5, 0xd684, 0x4c60, 0x8e, 0x4d, 0xd3, 0xbb, 0x0f, 0x0b, 0xe3, 0xee}
DEFINE_GUID!{OPM_SET_HDCP_SRM,
0x8b5ef5d1, 0xc30d, 0x44ff, 0x84, 0xa5, 0xea, 0x71, 0xdc, 0xe7, 0x8f, 0x13}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL_ACCORDING_TO_CSS_DVD,
0x39ce333e, 0x4cc0, 0x44ae, 0xbf, 0xcc, 0xda, 0x50, 0xb5, 0xf8, 0x2e, 0x72}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0001 {
OPM_OMAC_SIZE = 16,
OPM_128_BIT_RANDOM_NUMBER_SIZE = 16,
OPM_ENCRYPTED_INITIALIZATION_PARAMETERS_SIZE = 256,
OPM_CONFIGURE_SETTING_DATA_SIZE = 4056,
OPM_GET_INFORMATION_PARAMETERS_SIZE = 4056,
OPM_REQUESTED_INFORMATION_SIZE = 4076,
OPM_HDCP_KEY_SELECTION_VECTOR_SIZE = 5,
OPM_PROTECTION_TYPE_SIZE = 4,
OPM_BUS_TYPE_MASK = 0xffff,
OPM_BUS_IMPLEMENTATION_MODIFIER_MASK = 0x7fff,
}}
ENUM!{enum OPM_VIDEO_OUTPUT_SEMANTICS {
OPM_VOS_COPP_SEMANTICS = 0,
OPM_VOS_OPM_SEMANTICS = 1,
OPM_VOS_OPM_INDIRECT_DISPLAY = 2,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0002 {
OPM_HDCP_FLAG_NONE = 0,
OPM_HDCP_FLAG_REPEATER = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0003 {
OPM_STATUS_NORMAL = 0,
OPM_STATUS_LINK_LOST = 0x1,
OPM_STATUS_RENEGOTIATION_REQUIRED = 0x2,
OPM_STATUS_TAMPERING_DETECTED = 0x4,
OPM_STATUS_REVOKED_HDCP_DEVICE_ATTACHED = 0x8,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0004 {
OPM_CONNECTOR_TYPE_OTHER = -1i32 as u32,
OPM_CONNECTOR_TYPE_VGA = 0,
OPM_CONNECTOR_TYPE_SVIDEO = 1,
OPM_CONNECTOR_TYPE_COMPOSITE_VIDEO = 2,
OPM_CONNECTOR_TYPE_COMPONENT_VIDEO = 3,
OPM_CONNECTOR_TYPE_DVI = 4,
OPM_CONNECTOR_TYPE_HDMI = 5,
OPM_CONNECTOR_TYPE_LVDS = 6,
OPM_CONNECTOR_TYPE_D_JPN = 8,
OPM_CONNECTOR_TYPE_SDI = 9,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EXTERNAL = 10,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EMBEDDED = 11,
OPM_CONNECTOR_TYPE_UDI_EXTERNAL = 12,
OPM_CONNECTOR_TYPE_UDI_EMBEDDED = 13,
OPM_CONNECTOR_TYPE_RESERVED = 14,
OPM_CONNECTOR_TYPE_MIRACAST = 15,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_A = 16,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_B = 17,
OPM_COPP_COMPATIBLE_CONNECTOR_TYPE_INTERNAL = 0x80000000,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0005 {
OPM_DVI_CHARACTERISTIC_1_0 = 1,
OPM_DVI_CHARACTERISTIC_1_1_OR_ABOVE = 2,
}}
ENUM!{enum OPM_OUTPUT_HARDWARE_PROTECTION {
OPM_OUTPUT_HARDWARE_PROTECTION_NOT_SUPPORTED = 0,
OPM_OUTPUT_HARDWARE_PROTECTION_SUPPORTED = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0006 {
OPM_BUS_TYPE_OTHER = 0,
OPM_BUS_TYPE_PCI = 0x1,
OPM_BUS_TYPE_PCIX = 0x2,
OPM_BUS_TYPE_PCIEXPRESS = 0x3,
OPM_BUS_TYPE_AGP = 0x4,
OPM_BUS_IMPLEMENTATION_MODIFIER_INSIDE_OF_CHIPSET = 0x10000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP = 0x20000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET = 0x30000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR = 0x40000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE = 0x50000,
OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD = 0x80000000,
OPM_COPP_COMPATIBLE_BUS_TYPE_INTEGRATED = 0x80000000,
}}
ENUM!{enum OPM_DPCP_PROTECTION_LEVEL {
OPM_DPCP_OFF = 0,
OPM_DPCP_ON = 1,
OPM_DPCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_HDCP_PROTECTION_LEVEL {
OPM_HDCP_OFF = 0,
OPM_HDCP_ON = 1,
OPM_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_TYPE_ENFORCEMENT_HDCP_PROTECTION_LEVEL {
OPM_TYPE_ENFORCEMENT_HDCP_OFF = OPM_HDCP_OFF,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_NO_TYPE_RESTRICTION = OPM_HDCP_ON,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_TYPE1_RESTRICTION = OPM_HDCP_ON + 1,
OPM_TYPE_ENFORCEMENT_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0007 {
OPM_CGMSA_OFF = 0,
OPM_CGMSA_COPY_FREELY = 0x1,
OPM_CGMSA_COPY_NO_MORE = 0x2,
OPM_CGMSA_COPY_ONE_GENERATION = 0x3,
OPM_CGMSA_COPY_NEVER = 0x4,
OPM_CGMSA_REDISTRIBUTION_CONTROL_REQUIRED = 0x8,
}}
ENUM!{enum OPM_ACP_PROTECTION_LEVEL {
OPM_ACP_OFF = 0,
OPM_ACP_LEVEL_ONE = 1,
OPM_ACP_LEVEL_TWO = 2,
OPM_ACP_LEVEL_THREE = 3,
OPM_ACP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0008 {
OPM_PROTECTION_TYPE_OTHER = 0x80000000,
OPM_PROTECTION_TYPE_NONE = 0,
OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x1,
OPM_PROTECTION_TYPE_ACP = 0x2,
OPM_PROTECTION_TYPE_CGMSA = 0x4,
OPM_PROTECTION_TYPE_HDCP = 0x8,
OPM_PROTECTION_TYPE_DPCP = 0x10,
OPM_PROTECTION_TYPE_TYPE_ENFORCEMENT_HDCP = 0x20,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0009 {
OPM_PROTECTION_STANDARD_OTHER = 0x80000000,
OPM_PROTECTION_STANDARD_NONE = 0,
OPM_PROTECTION_STANDARD_IEC61880_525I = 0x1,
OPM_PROTECTION_STANDARD_IEC61880_2_525I = 0x2,
OPM_PROTECTION_STANDARD_IEC62375_625P = 0x4,
OPM_PROTECTION_STANDARD_EIA608B_525 = 0x8,
OPM_PROTECTION_STANDARD_EN300294_625I = 0x10, | OPM_PROTECTION_STANDARD_CEA805A_TYPEA_1125I = 0x80,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_525P = 0x100,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_750P = 0x200,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_1125I = 0x400,
OPM_PROTECTION_STANDARD_ARIBTRB15_525I = 0x800,
OPM_PROTECTION_STANDARD_ARIBTRB15_525P = 0x1000,
OPM_PROTECTION_STANDARD_ARIBTRB15_750P = 0x2000,
OPM_PROTECTION_STANDARD_ARIBTRB15_1125I = 0x4000,
}}
ENUM!{enum OPM_IMAGE_ASPECT_RATIO_EN300294 {
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3 = 0,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_CENTER = 1,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_TOP = 2,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_CENTER = 3,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_TOP = 4,
OPM_ASPECT_RATIO_EN300294_BOX_GT_16_BY_9_CENTER = 5,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3_PROTECTED_CENTER = 6,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_16_BY_9_ANAMORPHIC = 7,
OPM_ASPECT_RATIO_FORCE_ULONG = 0x7fffffff,
}}
STRUCT!{#[repr(packed)] struct OPM_RANDOM_NUMBER {
abRandomNumber: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_OMAC {
abOMAC: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_ENCRYPTED_INITIALIZATION_PARAMETERS {
abEncryptedInitializationParameters: [BYTE; 256],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_INFO_PARAMETERS {
omac: OPM_OMAC,
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS {
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_HDCP_KEY_SELECTION_VECTOR {
abKeySelectionVector: [BYTE; 5],
}}
STRUCT!{#[repr(packed)] struct OPM_CONNECTED_HDCP_DEVICE_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulHDCPFlags: ULONG,
ksvB: OPM_HDCP_KEY_SELECTION_VECTOR,
Reserved: [BYTE; 11],
Reserved2: [BYTE; 16],
Reserved3: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_REQUESTED_INFORMATION {
omac: OPM_OMAC,
cbRequestedInformationSize: ULONG,
abRequestedInformation: [BYTE; 4076],
}}
STRUCT!{#[repr(packed)] struct OPM_STANDARD_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulInformation: ULONG,
ulReserved: ULONG,
ulReserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACTUAL_OUTPUT_FORMAT {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulDisplayWidth: ULONG,
ulDisplayHeight: ULONG,
dsfSampleInterleaveFormat: DXVA2_SampleFormat,
d3dFormat: D3DFORMAT,
ulFrequencyNumerator: ULONG,
ulFrequencyDenominator: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACP_AND_CGMSA_SIGNALING {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulAvailableTVProtectionStandards: ULONG,
ulActiveTVProtectionStandard: ULONG,
ulReserved: ULONG,
ulAspectRatioValidMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioValidMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioValidMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved2: [ULONG; 4],
ulReserved3: [ULONG; 4],
}}
STRUCT!{#[repr(packed)] struct OPM_OUTPUT_ID_DATA {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
OutputId: UINT64,
}}
STRUCT!{#[repr(packed)] struct OPM_CONFIGURE_PARAMETERS {
omac: OPM_OMAC,
guidSetting: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_SET_PROTECTION_LEVEL_PARAMETERS {
ulProtectionType: ULONG,
ulProtectionLevel: ULONG,
Reserved: ULONG,
Reserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_ACP_AND_CGMSA_SIGNALING_PARAMETERS {
ulNewTVProtectionStandard: ULONG,
ulAspectRatioChangeMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioChangeMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioChangeMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved: [ULONG; 4],
ulReserved2: [ULONG; 4],
ulReserved3: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_HDCP_SRM_PARAMETERS {
ulSRMVersion: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_PARAMETERS {
cbVerifier: DWORD,
Verifier: [BYTE; 4052],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
Merit: DWORD,
}}
DEFINE_GUID!{IID_IOPMVideoOutput,
0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d}
RIDL!{#[uuid(0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d)]
interface IOPMVideoOutput(IOPMVideoOutputVtbl): IUnknown(IUnknownVtbl) {
fn StartInitialization(
prnRandomNumber: *mut OPM_RANDOM_NUMBER,
ppbCertificate: *mut *mut BYTE,
pulCertificateLength: *mut ULONG,
) -> HRESULT,
fn FinishInitialization(
pParameters: *const OPM_ENCRYPTED_INITIALIZATION_PARAMETERS,
) -> HRESULT,
fn GetInformation(
pParameters: *const OPM_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn COPPCompatibleGetInformation(
pParameters: *const OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn Configure(
pParameters: *const OPM_CONFIGURE_PARAMETERS,
ulAdditionalParametersSize: ULONG,
pbAdditionalParameters: *const BYTE,
) -> HRESULT,
}}
#[inline]
pub fn GetBusType(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_TYPE_MASK
}
#[inline]
pub fn GetBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG {
(ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_MASK) >> 16
}
#[inline]
pub fn IsNonStandardBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD
}
extern "system" {
pub fn OPMGetVideoOutputsFromHMONITOR(
hMonitor: HMONITOR,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
pulNumVideoOutputs: *mut ULONG,
pppOPMVideoOutputArray: *mut *mut *mut IOPMVideoOutput,
) -> HRESULT;
pub fn OPMGetVideoOutputForTarget(
pAdapterLuid: *mut LUID,
VidPnTarget: ULONG,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
ppOPMVideoOutput: *mut *mut IOPMVideoOutput,
) -> HRESULT;
pub fn OPMGetVideoOutputsFromIDirect3DDevice9Object(
pDirect3DDevice9: *mut IDirect3DDevice9,
vos: OPM_VIDEO_OUTPUT_SEMANTICS,
pulNumVideoOutputs: *mut ULONG,
pppOPMVideoOutputArray: *mut *mut *mut IOPMVideoOutput,
) -> HRESULT;
} | OPM_PROTECTION_STANDARD_CEA805A_TYPEA_525P = 0x20,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_750P = 0x40, | random_line_split |
oauth.rs | extern crate rand;
//Concurrency stuff
use std::io::Read;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use tiny_http::{Response, Server};
use serde::{Deserialize, Serialize};
use curl::easy::{Easy, List};
#[derive(Debug)]
enum OAuthState {
IDLE,
AUTHORIZED,
ERROR,
}
#[derive(Serialize, Deserialize, Debug)]
struct OAuthToken {
access_token: String,
token_type: String,
expires_in: usize,
scope: String,
refresh_token: String,
}
#[derive(Debug)]
pub struct OAuthClient {
client_id: String,
client_state: String,
authorization_link: String,
auth_state: OAuthState,
oauth_token: Option<OAuthToken>,
pub error_state: String,
pub code: String,
}
struct AuthBox {
has_error: bool,
error_msg: String,
code: String,
state: String,
}
impl OAuthClient {
pub fn new() -> OAuthClient {
build_oauth_client()
}
pub fn get_access_token(&self) -> String {
if let token = self.oauth_token.as_ref().unwrap() {
return token.access_token.clone();
}
"".to_string()
}
}
fn build_oauth_client() -> OAuthClient {
let client_state = generate_random_string(10);
let client_id = "7tMofTv8Ip3-Ig".to_string();
let authorization_link = format!( "https://www.reddit.com/api/v1/authorize?client_id={}&response_type=code&state={}&redirect_uri=http%3A%2F%2F127.0.0.1:8000&duration=permanent&scope=identity",client_id,client_state);
OAuthClient {
client_id,
client_state,
oauth_token: None,
authorization_link,
auth_state: OAuthState::IDLE,
error_state: "Initialized".to_string(),
code: "".to_string(),
}
}
pub fn curl_site(subreddit: &str, amount: usize, before: &str, after: &str) -> String {
let mut limit = amount;
if limit == 0 {
limit = 1;
}
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
let reddit_base_url = format!(
"https://www.reddit.com{}/.json?limit={}&after={}&before={}",
subreddit, limit, after, before
);
easy.url(&reddit_base_url).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut return_data: Vec<String> = Vec::new();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
return_data.push(html.clone());
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
return_data.join("")
}
pub fn request_site(token: String, url: String) {
println!("token:{} url:{}", token, url);
let mut list = List::new();
let data_header = format!("Authorization: bearer {}", token);
println!("data header: {}", data_header);
list.append(&data_header.to_string()).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url(&url).unwrap();
easy.http_headers(list).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
}
pub fn authorize_user(wait_time: usize) -> OAuthClient {
println!("Logging in...");
let mut oauth_client = build_oauth_client();
if does_access_token_exist() {
println!("Client already authorized");
use std::fs;
let access_token_serialized: String = fs::read_to_string("./access_token.rvp").unwrap();
let access_token: OAuthToken = serde_json::from_str(&access_token_serialized).unwrap();
oauth_client = OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(access_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: "".to_string(),
}
} else {
oauth_client = authorize_client(oauth_client, wait_time);
println!("Done!");
}
oauth_client
}
fn authorize_client(oauth_client: OAuthClient, wait_time: usize) -> OAuthClient {
if!webbrowser::open(&oauth_client.authorization_link).is_ok() {
println!("Could not open web browser");
}
let final_response =
Response::from_string("Authentication complete. You may close this window.");
let (tx_authentication, rx) = mpsc::channel();
let tx_countdown = mpsc::Sender::clone(&tx_authentication);
thread::spawn(move || {
let server = Server::http("127.0.0.1:8000").unwrap();
for request in server.incoming_requests() {
let request_url = request.url().to_string().clone();
let parameter_string: Vec<&str> = request_url.split("/?").collect();
if parameter_string.len() <= 1 {
continue;
};
let parameters: Vec<&str> = parameter_string[1].split('&').collect();
// Expect state and code parameters
if parameters.len()!= 2 {
let auth_box = AuthBox {
has_error: true,
error_msg: "Unexpected response from reddit api".to_string(),
code: "".to_string(),
state: "".to_string(),
};
tx_authentication.send(auth_box);
} else {
let state: Vec<&str> = parameters[0].split('=').collect();
let code: Vec<&str> = parameters[1].split('=').collect();
let auth_box = AuthBox {
has_error: false,
error_msg: "".to_string(),
code: code[1].to_string(),
state: state[1].to_string(),
};
tx_authentication.send(auth_box).unwrap();
}
}
drop(server);
});
thread::spawn(move || {
for passed_seconds in 0..wait_time {
thread::sleep(Duration::from_secs(1));
}
let auth_box = AuthBox {
has_error: true,
error_msg: "Reached timeout. User did not authorize usage of RPV in time".to_string(),
code: "".to_string(),
state: "".to_string(),
};
println!("Timeout during authentication");
tx_countdown.send(auth_box).unwrap();
});
//print!("{}[2J", 27 as char);
let auth_box = rx.recv().unwrap();
println!("Now waiting for access token.");
let data_field_string = format!(
"grant_type=authorization_code&code={}&redirect_uri=http://127.0.0.1:8000",
auth_box.code
);
println!("Datafield: {}", data_field_string);
let mut data_field = data_field_string.as_bytes();
let mut list = List::new();
let data_header = "Authorization: Basic N3RNb2ZUdjhJcDMtSWc6";
list.append(data_header).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url("https://www.reddit.com/api/v1/access_token")
.unwrap();
easy.http_headers(list).unwrap();
easy.post(true).unwrap();
easy.useragent(user_agent_header).unwrap();
easy.post_field_size(data_field.len() as u64).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.read_function(|buf| Ok(data_field.read(buf).unwrap_or(0)))
.unwrap();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
let oauth_token: OAuthToken = serde_json::from_str(&html).unwrap();
// Handle authentication response
if!auth_box.has_error {
if auth_box.state == oauth_client.client_state {
save_token(&oauth_token);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(oauth_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: auth_box.code,
}
} else {
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: "Return code is not the same. There is some tampering happening."
.to_string(),
code: auth_box.code,
}
}
} else {
println!("Error: {}", auth_box.error_msg);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: auth_box.error_msg,
code: oauth_client.code,
}
}
}
fn does_access_token_exist() -> bool {
use std::path::Path;
Path::new("./access_token.rvp").exists()
}
fn save_token(token: &OAuthToken) {
let serialized_token = serde_json::to_string(&token).unwrap();
use std::fs;
use std::fs::File;
use std::io::prelude::*;
if does_access_token_exist() |
let mut file = File::create("access_token.rvp").expect("Unable to create file");
file.write_all(serialized_token.as_bytes())
.expect("Unable to write access token");
}
fn generate_random_string(n: usize) -> String {
use rand::Rng;
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
let mut rng = rand::thread_rng();
let random_state: String = (0..n)
.map(|_| {
let idx = rng.gen_range(0, CHARSET.len());
CHARSET[idx] as char
})
.collect();
random_state
}
| {
fs::remove_file("access_token.rvp").expect("Could not remove file");
} | conditional_block |
oauth.rs | extern crate rand;
//Concurrency stuff
use std::io::Read;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use tiny_http::{Response, Server};
use serde::{Deserialize, Serialize};
use curl::easy::{Easy, List};
#[derive(Debug)]
enum OAuthState {
IDLE,
AUTHORIZED,
ERROR,
}
#[derive(Serialize, Deserialize, Debug)]
struct OAuthToken {
access_token: String,
token_type: String,
expires_in: usize,
scope: String,
refresh_token: String,
}
#[derive(Debug)]
pub struct | {
client_id: String,
client_state: String,
authorization_link: String,
auth_state: OAuthState,
oauth_token: Option<OAuthToken>,
pub error_state: String,
pub code: String,
}
struct AuthBox {
has_error: bool,
error_msg: String,
code: String,
state: String,
}
impl OAuthClient {
pub fn new() -> OAuthClient {
build_oauth_client()
}
pub fn get_access_token(&self) -> String {
if let token = self.oauth_token.as_ref().unwrap() {
return token.access_token.clone();
}
"".to_string()
}
}
fn build_oauth_client() -> OAuthClient {
let client_state = generate_random_string(10);
let client_id = "7tMofTv8Ip3-Ig".to_string();
let authorization_link = format!( "https://www.reddit.com/api/v1/authorize?client_id={}&response_type=code&state={}&redirect_uri=http%3A%2F%2F127.0.0.1:8000&duration=permanent&scope=identity",client_id,client_state);
OAuthClient {
client_id,
client_state,
oauth_token: None,
authorization_link,
auth_state: OAuthState::IDLE,
error_state: "Initialized".to_string(),
code: "".to_string(),
}
}
pub fn curl_site(subreddit: &str, amount: usize, before: &str, after: &str) -> String {
let mut limit = amount;
if limit == 0 {
limit = 1;
}
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
let reddit_base_url = format!(
"https://www.reddit.com{}/.json?limit={}&after={}&before={}",
subreddit, limit, after, before
);
easy.url(&reddit_base_url).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut return_data: Vec<String> = Vec::new();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
return_data.push(html.clone());
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
return_data.join("")
}
pub fn request_site(token: String, url: String) {
println!("token:{} url:{}", token, url);
let mut list = List::new();
let data_header = format!("Authorization: bearer {}", token);
println!("data header: {}", data_header);
list.append(&data_header.to_string()).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url(&url).unwrap();
easy.http_headers(list).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
}
pub fn authorize_user(wait_time: usize) -> OAuthClient {
println!("Logging in...");
let mut oauth_client = build_oauth_client();
if does_access_token_exist() {
println!("Client already authorized");
use std::fs;
let access_token_serialized: String = fs::read_to_string("./access_token.rvp").unwrap();
let access_token: OAuthToken = serde_json::from_str(&access_token_serialized).unwrap();
oauth_client = OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(access_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: "".to_string(),
}
} else {
oauth_client = authorize_client(oauth_client, wait_time);
println!("Done!");
}
oauth_client
}
fn authorize_client(oauth_client: OAuthClient, wait_time: usize) -> OAuthClient {
if!webbrowser::open(&oauth_client.authorization_link).is_ok() {
println!("Could not open web browser");
}
let final_response =
Response::from_string("Authentication complete. You may close this window.");
let (tx_authentication, rx) = mpsc::channel();
let tx_countdown = mpsc::Sender::clone(&tx_authentication);
thread::spawn(move || {
let server = Server::http("127.0.0.1:8000").unwrap();
for request in server.incoming_requests() {
let request_url = request.url().to_string().clone();
let parameter_string: Vec<&str> = request_url.split("/?").collect();
if parameter_string.len() <= 1 {
continue;
};
let parameters: Vec<&str> = parameter_string[1].split('&').collect();
// Expect state and code parameters
if parameters.len()!= 2 {
let auth_box = AuthBox {
has_error: true,
error_msg: "Unexpected response from reddit api".to_string(),
code: "".to_string(),
state: "".to_string(),
};
tx_authentication.send(auth_box);
} else {
let state: Vec<&str> = parameters[0].split('=').collect();
let code: Vec<&str> = parameters[1].split('=').collect();
let auth_box = AuthBox {
has_error: false,
error_msg: "".to_string(),
code: code[1].to_string(),
state: state[1].to_string(),
};
tx_authentication.send(auth_box).unwrap();
}
}
drop(server);
});
thread::spawn(move || {
for passed_seconds in 0..wait_time {
thread::sleep(Duration::from_secs(1));
}
let auth_box = AuthBox {
has_error: true,
error_msg: "Reached timeout. User did not authorize usage of RPV in time".to_string(),
code: "".to_string(),
state: "".to_string(),
};
println!("Timeout during authentication");
tx_countdown.send(auth_box).unwrap();
});
//print!("{}[2J", 27 as char);
let auth_box = rx.recv().unwrap();
println!("Now waiting for access token.");
let data_field_string = format!(
"grant_type=authorization_code&code={}&redirect_uri=http://127.0.0.1:8000",
auth_box.code
);
println!("Datafield: {}", data_field_string);
let mut data_field = data_field_string.as_bytes();
let mut list = List::new();
let data_header = "Authorization: Basic N3RNb2ZUdjhJcDMtSWc6";
list.append(data_header).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url("https://www.reddit.com/api/v1/access_token")
.unwrap();
easy.http_headers(list).unwrap();
easy.post(true).unwrap();
easy.useragent(user_agent_header).unwrap();
easy.post_field_size(data_field.len() as u64).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.read_function(|buf| Ok(data_field.read(buf).unwrap_or(0)))
.unwrap();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
let oauth_token: OAuthToken = serde_json::from_str(&html).unwrap();
// Handle authentication response
if!auth_box.has_error {
if auth_box.state == oauth_client.client_state {
save_token(&oauth_token);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(oauth_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: auth_box.code,
}
} else {
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: "Return code is not the same. There is some tampering happening."
.to_string(),
code: auth_box.code,
}
}
} else {
println!("Error: {}", auth_box.error_msg);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: auth_box.error_msg,
code: oauth_client.code,
}
}
}
fn does_access_token_exist() -> bool {
use std::path::Path;
Path::new("./access_token.rvp").exists()
}
fn save_token(token: &OAuthToken) {
let serialized_token = serde_json::to_string(&token).unwrap();
use std::fs;
use std::fs::File;
use std::io::prelude::*;
if does_access_token_exist() {
fs::remove_file("access_token.rvp").expect("Could not remove file");
}
let mut file = File::create("access_token.rvp").expect("Unable to create file");
file.write_all(serialized_token.as_bytes())
.expect("Unable to write access token");
}
fn generate_random_string(n: usize) -> String {
use rand::Rng;
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
let mut rng = rand::thread_rng();
let random_state: String = (0..n)
.map(|_| {
let idx = rng.gen_range(0, CHARSET.len());
CHARSET[idx] as char
})
.collect();
random_state
}
| OAuthClient | identifier_name |
oauth.rs | extern crate rand;
//Concurrency stuff
use std::io::Read;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use tiny_http::{Response, Server};
use serde::{Deserialize, Serialize};
use curl::easy::{Easy, List};
#[derive(Debug)]
enum OAuthState {
IDLE,
AUTHORIZED,
ERROR,
}
#[derive(Serialize, Deserialize, Debug)]
struct OAuthToken {
access_token: String,
token_type: String,
expires_in: usize,
scope: String,
refresh_token: String,
}
#[derive(Debug)]
pub struct OAuthClient {
client_id: String,
client_state: String,
authorization_link: String,
auth_state: OAuthState,
oauth_token: Option<OAuthToken>,
pub error_state: String,
pub code: String,
}
struct AuthBox {
has_error: bool,
error_msg: String,
code: String,
state: String,
}
impl OAuthClient {
pub fn new() -> OAuthClient {
build_oauth_client()
}
pub fn get_access_token(&self) -> String {
if let token = self.oauth_token.as_ref().unwrap() {
return token.access_token.clone();
}
"".to_string()
}
}
fn build_oauth_client() -> OAuthClient {
let client_state = generate_random_string(10);
let client_id = "7tMofTv8Ip3-Ig".to_string();
let authorization_link = format!( "https://www.reddit.com/api/v1/authorize?client_id={}&response_type=code&state={}&redirect_uri=http%3A%2F%2F127.0.0.1:8000&duration=permanent&scope=identity",client_id,client_state);
OAuthClient {
client_id,
client_state,
oauth_token: None,
authorization_link,
auth_state: OAuthState::IDLE,
error_state: "Initialized".to_string(),
code: "".to_string(),
}
}
pub fn curl_site(subreddit: &str, amount: usize, before: &str, after: &str) -> String {
let mut limit = amount;
if limit == 0 {
limit = 1;
}
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
let reddit_base_url = format!(
"https://www.reddit.com{}/.json?limit={}&after={}&before={}",
subreddit, limit, after, before
);
easy.url(&reddit_base_url).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut return_data: Vec<String> = Vec::new();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
return_data.push(html.clone());
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
return_data.join("")
}
pub fn request_site(token: String, url: String) {
println!("token:{} url:{}", token, url);
let mut list = List::new();
let data_header = format!("Authorization: bearer {}", token);
println!("data header: {}", data_header);
list.append(&data_header.to_string()).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url(&url).unwrap();
easy.http_headers(list).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
}
pub fn authorize_user(wait_time: usize) -> OAuthClient {
println!("Logging in...");
let mut oauth_client = build_oauth_client();
if does_access_token_exist() {
println!("Client already authorized");
use std::fs;
let access_token_serialized: String = fs::read_to_string("./access_token.rvp").unwrap();
let access_token: OAuthToken = serde_json::from_str(&access_token_serialized).unwrap();
oauth_client = OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(access_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: "".to_string(),
}
} else {
oauth_client = authorize_client(oauth_client, wait_time);
println!("Done!");
}
oauth_client
}
fn authorize_client(oauth_client: OAuthClient, wait_time: usize) -> OAuthClient {
if!webbrowser::open(&oauth_client.authorization_link).is_ok() {
println!("Could not open web browser");
}
let final_response =
Response::from_string("Authentication complete. You may close this window.");
let (tx_authentication, rx) = mpsc::channel();
let tx_countdown = mpsc::Sender::clone(&tx_authentication);
thread::spawn(move || {
let server = Server::http("127.0.0.1:8000").unwrap();
for request in server.incoming_requests() {
let request_url = request.url().to_string().clone();
let parameter_string: Vec<&str> = request_url.split("/?").collect();
if parameter_string.len() <= 1 {
continue;
};
let parameters: Vec<&str> = parameter_string[1].split('&').collect();
// Expect state and code parameters
if parameters.len()!= 2 {
let auth_box = AuthBox {
has_error: true,
error_msg: "Unexpected response from reddit api".to_string(),
code: "".to_string(),
state: "".to_string(),
};
tx_authentication.send(auth_box);
} else {
let state: Vec<&str> = parameters[0].split('=').collect();
let code: Vec<&str> = parameters[1].split('=').collect();
let auth_box = AuthBox {
has_error: false,
error_msg: "".to_string(),
code: code[1].to_string(),
state: state[1].to_string(),
};
tx_authentication.send(auth_box).unwrap();
}
}
drop(server);
});
thread::spawn(move || { | }
let auth_box = AuthBox {
has_error: true,
error_msg: "Reached timeout. User did not authorize usage of RPV in time".to_string(),
code: "".to_string(),
state: "".to_string(),
};
println!("Timeout during authentication");
tx_countdown.send(auth_box).unwrap();
});
//print!("{}[2J", 27 as char);
let auth_box = rx.recv().unwrap();
println!("Now waiting for access token.");
let data_field_string = format!(
"grant_type=authorization_code&code={}&redirect_uri=http://127.0.0.1:8000",
auth_box.code
);
println!("Datafield: {}", data_field_string);
let mut data_field = data_field_string.as_bytes();
let mut list = List::new();
let data_header = "Authorization: Basic N3RNb2ZUdjhJcDMtSWc6";
list.append(data_header).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url("https://www.reddit.com/api/v1/access_token")
.unwrap();
easy.http_headers(list).unwrap();
easy.post(true).unwrap();
easy.useragent(user_agent_header).unwrap();
easy.post_field_size(data_field.len() as u64).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.read_function(|buf| Ok(data_field.read(buf).unwrap_or(0)))
.unwrap();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
let oauth_token: OAuthToken = serde_json::from_str(&html).unwrap();
// Handle authentication response
if!auth_box.has_error {
if auth_box.state == oauth_client.client_state {
save_token(&oauth_token);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(oauth_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: auth_box.code,
}
} else {
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: "Return code is not the same. There is some tampering happening."
.to_string(),
code: auth_box.code,
}
}
} else {
println!("Error: {}", auth_box.error_msg);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: auth_box.error_msg,
code: oauth_client.code,
}
}
}
fn does_access_token_exist() -> bool {
use std::path::Path;
Path::new("./access_token.rvp").exists()
}
fn save_token(token: &OAuthToken) {
let serialized_token = serde_json::to_string(&token).unwrap();
use std::fs;
use std::fs::File;
use std::io::prelude::*;
if does_access_token_exist() {
fs::remove_file("access_token.rvp").expect("Could not remove file");
}
let mut file = File::create("access_token.rvp").expect("Unable to create file");
file.write_all(serialized_token.as_bytes())
.expect("Unable to write access token");
}
fn generate_random_string(n: usize) -> String {
use rand::Rng;
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
let mut rng = rand::thread_rng();
let random_state: String = (0..n)
.map(|_| {
let idx = rng.gen_range(0, CHARSET.len());
CHARSET[idx] as char
})
.collect();
random_state
} | for passed_seconds in 0..wait_time {
thread::sleep(Duration::from_secs(1)); | random_line_split |
connection.rs |
enum ReceiverStatus {
Udp(Vec<u8>),
Websocket(VoiceEvent),
}
#[allow(dead_code)]
struct ThreadItems {
rx: MpscReceiver<ReceiverStatus>,
udp_close_sender: MpscSender<i32>,
udp_thread: JoinHandle<()>,
ws_close_sender: MpscSender<i32>,
ws_thread: JoinHandle<()>,
}
#[allow(dead_code)]
pub struct Connection {
audio_timer: Timer,
client: Arc<Mutex<Client>>,
decoder_map: HashMap<(u32, Channels), OpusDecoder>,
destination: SocketAddr,
encoder: OpusEncoder,
encoder_stereo: bool,
keepalive_timer: Timer,
key: Key,
sequence: u16,
silence_frames: u8,
soft_clip: SoftClip,
speaking: bool,
ssrc: u32,
thread_items: ThreadItems,
timestamp: u32,
udp: UdpSocket,
user_id: UserId,
}
impl Connection {
pub fn new(mut info: ConnectionInfo) -> Result<Connection> {
let url = generate_url(&mut info.endpoint)?;
let mut client = ClientBuilder::from_url(&url).connect_secure(None)?;
client.send_json(&payload::build_identify(&info))?;
let hello = loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Hello(received_hello) => {
break received_hello;
},
VoiceEvent::Heartbeat(_) => continue,
other => {
debug!("[Voice] Expected hello/heartbeat; got: {:?}", other);
return Err(Error::Voice(VoiceError::ExpectedHandshake));
},
}
};
if!has_valid_mode(&hello.modes) {
return Err(Error::Voice(VoiceError::VoiceModeUnavailable));
}
let destination = (&info.endpoint[..], hello.port)
.to_socket_addrs()?
.next()
.ok_or(Error::Voice(VoiceError::HostnameResolve))?;
// Important to note here: the length of the packet can be of either 4
// or 70 bytes. If it is 4 bytes, then we need to send a 70-byte packet
// to determine the IP.
//
// Past the initial 4 bytes, the packet _must_ be completely empty data.
//
// The returned packet will be a null-terminated string of the IP, and
// the port encoded in LE in the last two bytes of the packet.
let udp = UdpSocket::bind("0.0.0.0:0")?;
{
let mut bytes = [0; 70];
(&mut bytes[..]).write_u32::<BigEndian>(hello.ssrc)?;
udp.send_to(&bytes, destination)?;
let mut bytes = [0; 256];
let (len, _addr) = udp.recv_from(&mut bytes)?;
// Find the position in the bytes that contains the first byte of 0,
// indicating the "end of the address".
let index = bytes
.iter()
.skip(4)
.position(|&x| x == 0)
.ok_or(Error::Voice(VoiceError::FindingByte))?;
let pos = 4 + index;
let addr = String::from_utf8_lossy(&bytes[4..pos]);
let port_pos = len - 2;
let port = (&bytes[port_pos..]).read_u16::<LittleEndian>()?;
client
.send_json(&payload::build_select_protocol(addr, port))?;
}
let key = encryption_key(&mut client)?;
let _ = client
.stream_ref()
.as_tcp()
.set_read_timeout(Some(Duration::from_millis(25)));
let mutexed_client = Arc::new(Mutex::new(client));
let thread_items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip =!aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo!= self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
},
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if!finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
| let index = HEADER_LEN + crypted.len();
packet[HEADER_LEN..index].clone_from_slice(&crypted);
self.sequence = self.sequence.wrapping_add(1);
self.timestamp = self.timestamp.wrapping_add(960);
Ok(HEADER_LEN + crypted.len())
}
fn set_speaking(&mut self, speaking: bool) -> Result<()> {
if self.speaking == speaking {
return Ok(());
}
self.speaking = speaking;
self.client.lock().send_json(&payload::build_speaking(speaking))
}
}
impl Drop for Connection {
fn drop(&mut self) {
let _ = self.thread_items.udp_close_sender.send(0);
let _ = self.thread_items.ws_close_sender.send(0);
info!("[Voice] Disconnected");
}
}
#[inline]
fn combine_audio(
raw_buffer: [i16; 1920],
float_buffer: &mut [f32; 1920],
true_stereo: bool,
volume: f32,
) {
for i in 0..1920 {
let sample_index = if true_stereo { i } else { i/2 };
let sample = (raw_buffer[sample_index] as f32) / 32768.0;
float_buffer[i] = float_buffer[i] + sample * volume;
}
}
fn generate_url(endpoint: &mut String) -> Result<WebsocketUrl> {
if endpoint.ends_with(":80") {
let len = endpoint.len();
endpoint.truncate(len - 3);
}
WebsocketUrl::parse(&format!("wss://{}/?v={}", endpoint, VOICE_GATEWAY_VERSION))
.or(Err(Error::Voice(VoiceError::EndpointUrl)))
}
#[inline]
fn encryption_key(client: &mut Client) -> Result<Key> {
loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Ready(ready) => {
if ready.mode!= CRYPTO_MODE {
return Err(Error::Voice(VoiceError::VoiceModeInvalid));
}
return Key::from_slice(&ready.secret_key)
.ok_or(Error::Voice(VoiceError::KeyGen));
},
VoiceEvent::Unknown(op, value) => {
debug!(
| let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
}; | random_line_split |
connection.rs | enum ReceiverStatus {
Udp(Vec<u8>),
Websocket(VoiceEvent),
}
#[allow(dead_code)]
struct ThreadItems {
rx: MpscReceiver<ReceiverStatus>,
udp_close_sender: MpscSender<i32>,
udp_thread: JoinHandle<()>,
ws_close_sender: MpscSender<i32>,
ws_thread: JoinHandle<()>,
}
#[allow(dead_code)]
pub struct Connection {
audio_timer: Timer,
client: Arc<Mutex<Client>>,
decoder_map: HashMap<(u32, Channels), OpusDecoder>,
destination: SocketAddr,
encoder: OpusEncoder,
encoder_stereo: bool,
keepalive_timer: Timer,
key: Key,
sequence: u16,
silence_frames: u8,
soft_clip: SoftClip,
speaking: bool,
ssrc: u32,
thread_items: ThreadItems,
timestamp: u32,
udp: UdpSocket,
user_id: UserId,
}
impl Connection {
pub fn new(mut info: ConnectionInfo) -> Result<Connection> {
let url = generate_url(&mut info.endpoint)?;
let mut client = ClientBuilder::from_url(&url).connect_secure(None)?;
client.send_json(&payload::build_identify(&info))?;
let hello = loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Hello(received_hello) => {
break received_hello;
},
VoiceEvent::Heartbeat(_) => continue,
other => {
debug!("[Voice] Expected hello/heartbeat; got: {:?}", other);
return Err(Error::Voice(VoiceError::ExpectedHandshake));
},
}
};
if!has_valid_mode(&hello.modes) {
return Err(Error::Voice(VoiceError::VoiceModeUnavailable));
}
let destination = (&info.endpoint[..], hello.port)
.to_socket_addrs()?
.next()
.ok_or(Error::Voice(VoiceError::HostnameResolve))?;
// Important to note here: the length of the packet can be of either 4
// or 70 bytes. If it is 4 bytes, then we need to send a 70-byte packet
// to determine the IP.
//
// Past the initial 4 bytes, the packet _must_ be completely empty data.
//
// The returned packet will be a null-terminated string of the IP, and
// the port encoded in LE in the last two bytes of the packet.
let udp = UdpSocket::bind("0.0.0.0:0")?;
{
let mut bytes = [0; 70];
(&mut bytes[..]).write_u32::<BigEndian>(hello.ssrc)?;
udp.send_to(&bytes, destination)?;
let mut bytes = [0; 256];
let (len, _addr) = udp.recv_from(&mut bytes)?;
// Find the position in the bytes that contains the first byte of 0,
// indicating the "end of the address".
let index = bytes
.iter()
.skip(4)
.position(|&x| x == 0)
.ok_or(Error::Voice(VoiceError::FindingByte))?;
let pos = 4 + index;
let addr = String::from_utf8_lossy(&bytes[4..pos]);
let port_pos = len - 2;
let port = (&bytes[port_pos..]).read_u16::<LittleEndian>()?;
client
.send_json(&payload::build_select_protocol(addr, port))?;
}
let key = encryption_key(&mut client)?;
let _ = client
.stream_ref()
.as_tcp()
.set_read_timeout(Some(Duration::from_millis(25)));
let mutexed_client = Arc::new(Mutex::new(client));
let thread_items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip =!aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo!= self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
},
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if!finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
};
let index = HEADER_LEN + crypted.len();
packet[HEADER_LEN..index].clone_from_slice(&crypted);
self.sequence = self.sequence.wrapping_add(1);
self.timestamp = self.timestamp.wrapping_add(960);
Ok(HEADER_LEN + crypted.len())
}
fn | (&mut self, speaking: bool) -> Result<()> {
if self.speaking == speaking {
return Ok(());
}
self.speaking = speaking;
self.client.lock().send_json(&payload::build_speaking(speaking))
}
}
impl Drop for Connection {
fn drop(&mut self) {
let _ = self.thread_items.udp_close_sender.send(0);
let _ = self.thread_items.ws_close_sender.send(0);
info!("[Voice] Disconnected");
}
}
#[inline]
fn combine_audio(
raw_buffer: [i16; 1920],
float_buffer: &mut [f32; 1920],
true_stereo: bool,
volume: f32,
) {
for i in 0..1920 {
let sample_index = if true_stereo { i } else { i/2 };
let sample = (raw_buffer[sample_index] as f32) / 32768.0;
float_buffer[i] = float_buffer[i] + sample * volume;
}
}
fn generate_url(endpoint: &mut String) -> Result<WebsocketUrl> {
if endpoint.ends_with(":80") {
let len = endpoint.len();
endpoint.truncate(len - 3);
}
WebsocketUrl::parse(&format!("wss://{}/?v={}", endpoint, VOICE_GATEWAY_VERSION))
.or(Err(Error::Voice(VoiceError::EndpointUrl)))
}
#[inline]
fn encryption_key(client: &mut Client) -> Result<Key> {
loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Ready(ready) => {
if ready.mode!= CRYPTO_MODE {
return Err(Error::Voice(VoiceError::VoiceModeInvalid));
}
return Key::from_slice(&ready.secret_key)
.ok_or(Error::Voice(VoiceError::KeyGen));
},
VoiceEvent::Unknown(op, value) => {
debug!(
| set_speaking | identifier_name |
connection.rs | enum ReceiverStatus {
Udp(Vec<u8>),
Websocket(VoiceEvent),
}
#[allow(dead_code)]
struct ThreadItems {
rx: MpscReceiver<ReceiverStatus>,
udp_close_sender: MpscSender<i32>,
udp_thread: JoinHandle<()>,
ws_close_sender: MpscSender<i32>,
ws_thread: JoinHandle<()>,
}
#[allow(dead_code)]
pub struct Connection {
audio_timer: Timer,
client: Arc<Mutex<Client>>,
decoder_map: HashMap<(u32, Channels), OpusDecoder>,
destination: SocketAddr,
encoder: OpusEncoder,
encoder_stereo: bool,
keepalive_timer: Timer,
key: Key,
sequence: u16,
silence_frames: u8,
soft_clip: SoftClip,
speaking: bool,
ssrc: u32,
thread_items: ThreadItems,
timestamp: u32,
udp: UdpSocket,
user_id: UserId,
}
impl Connection {
pub fn new(mut info: ConnectionInfo) -> Result<Connection> {
let url = generate_url(&mut info.endpoint)?;
let mut client = ClientBuilder::from_url(&url).connect_secure(None)?;
client.send_json(&payload::build_identify(&info))?;
let hello = loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Hello(received_hello) => {
break received_hello;
},
VoiceEvent::Heartbeat(_) => continue,
other => {
debug!("[Voice] Expected hello/heartbeat; got: {:?}", other);
return Err(Error::Voice(VoiceError::ExpectedHandshake));
},
}
};
if!has_valid_mode(&hello.modes) {
return Err(Error::Voice(VoiceError::VoiceModeUnavailable));
}
let destination = (&info.endpoint[..], hello.port)
.to_socket_addrs()?
.next()
.ok_or(Error::Voice(VoiceError::HostnameResolve))?;
// Important to note here: the length of the packet can be of either 4
// or 70 bytes. If it is 4 bytes, then we need to send a 70-byte packet
// to determine the IP.
//
// Past the initial 4 bytes, the packet _must_ be completely empty data.
//
// The returned packet will be a null-terminated string of the IP, and
// the port encoded in LE in the last two bytes of the packet.
let udp = UdpSocket::bind("0.0.0.0:0")?;
{
let mut bytes = [0; 70];
(&mut bytes[..]).write_u32::<BigEndian>(hello.ssrc)?;
udp.send_to(&bytes, destination)?;
let mut bytes = [0; 256];
let (len, _addr) = udp.recv_from(&mut bytes)?;
// Find the position in the bytes that contains the first byte of 0,
// indicating the "end of the address".
let index = bytes
.iter()
.skip(4)
.position(|&x| x == 0)
.ok_or(Error::Voice(VoiceError::FindingByte))?;
let pos = 4 + index;
let addr = String::from_utf8_lossy(&bytes[4..pos]);
let port_pos = len - 2;
let port = (&bytes[port_pos..]).read_u16::<LittleEndian>()?;
client
.send_json(&payload::build_select_protocol(addr, port))?;
}
let key = encryption_key(&mut client)?;
let _ = client
.stream_ref()
.as_tcp()
.set_read_timeout(Some(Duration::from_millis(25)));
let mutexed_client = Arc::new(Mutex::new(client));
let thread_items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip =!aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo!= self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => | ,
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if!finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
};
let index = HEADER_LEN + crypted.len();
packet[HEADER_LEN..index].clone_from_slice(&crypted);
self.sequence = self.sequence.wrapping_add(1);
self.timestamp = self.timestamp.wrapping_add(960);
Ok(HEADER_LEN + crypted.len())
}
fn set_speaking(&mut self, speaking: bool) -> Result<()> {
if self.speaking == speaking {
return Ok(());
}
self.speaking = speaking;
self.client.lock().send_json(&payload::build_speaking(speaking))
}
}
impl Drop for Connection {
fn drop(&mut self) {
let _ = self.thread_items.udp_close_sender.send(0);
let _ = self.thread_items.ws_close_sender.send(0);
info!("[Voice] Disconnected");
}
}
#[inline]
fn combine_audio(
raw_buffer: [i16; 1920],
float_buffer: &mut [f32; 1920],
true_stereo: bool,
volume: f32,
) {
for i in 0..1920 {
let sample_index = if true_stereo { i } else { i/2 };
let sample = (raw_buffer[sample_index] as f32) / 32768.0;
float_buffer[i] = float_buffer[i] + sample * volume;
}
}
fn generate_url(endpoint: &mut String) -> Result<WebsocketUrl> {
if endpoint.ends_with(":80") {
let len = endpoint.len();
endpoint.truncate(len - 3);
}
WebsocketUrl::parse(&format!("wss://{}/?v={}", endpoint, VOICE_GATEWAY_VERSION))
.or(Err(Error::Voice(VoiceError::EndpointUrl)))
}
#[inline]
fn encryption_key(client: &mut Client) -> Result<Key> {
loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Ready(ready) => {
if ready.mode!= CRYPTO_MODE {
return Err(Error::Voice(VoiceError::VoiceModeInvalid));
}
return Key::from_slice(&ready.secret_key)
.ok_or(Error::Voice(VoiceError::KeyGen));
},
VoiceEvent::Unknown(op, value) => {
debug!(
| {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
} | conditional_block |
connection.rs | enum ReceiverStatus {
Udp(Vec<u8>),
Websocket(VoiceEvent),
}
#[allow(dead_code)]
struct ThreadItems {
rx: MpscReceiver<ReceiverStatus>,
udp_close_sender: MpscSender<i32>,
udp_thread: JoinHandle<()>,
ws_close_sender: MpscSender<i32>,
ws_thread: JoinHandle<()>,
}
#[allow(dead_code)]
pub struct Connection {
audio_timer: Timer,
client: Arc<Mutex<Client>>,
decoder_map: HashMap<(u32, Channels), OpusDecoder>,
destination: SocketAddr,
encoder: OpusEncoder,
encoder_stereo: bool,
keepalive_timer: Timer,
key: Key,
sequence: u16,
silence_frames: u8,
soft_clip: SoftClip,
speaking: bool,
ssrc: u32,
thread_items: ThreadItems,
timestamp: u32,
udp: UdpSocket,
user_id: UserId,
}
impl Connection {
pub fn new(mut info: ConnectionInfo) -> Result<Connection> {
let url = generate_url(&mut info.endpoint)?;
let mut client = ClientBuilder::from_url(&url).connect_secure(None)?;
client.send_json(&payload::build_identify(&info))?;
let hello = loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Hello(received_hello) => {
break received_hello;
},
VoiceEvent::Heartbeat(_) => continue,
other => {
debug!("[Voice] Expected hello/heartbeat; got: {:?}", other);
return Err(Error::Voice(VoiceError::ExpectedHandshake));
},
}
};
if!has_valid_mode(&hello.modes) {
return Err(Error::Voice(VoiceError::VoiceModeUnavailable));
}
let destination = (&info.endpoint[..], hello.port)
.to_socket_addrs()?
.next()
.ok_or(Error::Voice(VoiceError::HostnameResolve))?;
// Important to note here: the length of the packet can be of either 4
// or 70 bytes. If it is 4 bytes, then we need to send a 70-byte packet
// to determine the IP.
//
// Past the initial 4 bytes, the packet _must_ be completely empty data.
//
// The returned packet will be a null-terminated string of the IP, and
// the port encoded in LE in the last two bytes of the packet.
let udp = UdpSocket::bind("0.0.0.0:0")?;
{
let mut bytes = [0; 70];
(&mut bytes[..]).write_u32::<BigEndian>(hello.ssrc)?;
udp.send_to(&bytes, destination)?;
let mut bytes = [0; 256];
let (len, _addr) = udp.recv_from(&mut bytes)?;
// Find the position in the bytes that contains the first byte of 0,
// indicating the "end of the address".
let index = bytes
.iter()
.skip(4)
.position(|&x| x == 0)
.ok_or(Error::Voice(VoiceError::FindingByte))?;
let pos = 4 + index;
let addr = String::from_utf8_lossy(&bytes[4..pos]);
let port_pos = len - 2;
let port = (&bytes[port_pos..]).read_u16::<LittleEndian>()?;
client
.send_json(&payload::build_select_protocol(addr, port))?;
}
let key = encryption_key(&mut client)?;
let _ = client
.stream_ref()
.as_tcp()
.set_read_timeout(Some(Duration::from_millis(25)));
let mutexed_client = Arc::new(Mutex::new(client));
let thread_items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip =!aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo!= self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
},
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if!finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
};
let index = HEADER_LEN + crypted.len();
packet[HEADER_LEN..index].clone_from_slice(&crypted);
self.sequence = self.sequence.wrapping_add(1);
self.timestamp = self.timestamp.wrapping_add(960);
Ok(HEADER_LEN + crypted.len())
}
fn set_speaking(&mut self, speaking: bool) -> Result<()> {
if self.speaking == speaking {
return Ok(());
}
self.speaking = speaking;
self.client.lock().send_json(&payload::build_speaking(speaking))
}
}
impl Drop for Connection {
fn drop(&mut self) {
let _ = self.thread_items.udp_close_sender.send(0);
let _ = self.thread_items.ws_close_sender.send(0);
info!("[Voice] Disconnected");
}
}
#[inline]
fn combine_audio(
raw_buffer: [i16; 1920],
float_buffer: &mut [f32; 1920],
true_stereo: bool,
volume: f32,
) |
fn generate_url(endpoint: &mut String) -> Result<WebsocketUrl> {
if endpoint.ends_with(":80") {
let len = endpoint.len();
endpoint.truncate(len - 3);
}
WebsocketUrl::parse(&format!("wss://{}/?v={}", endpoint, VOICE_GATEWAY_VERSION))
.or(Err(Error::Voice(VoiceError::EndpointUrl)))
}
#[inline]
fn encryption_key(client: &mut Client) -> Result<Key> {
loop {
let value = match client.recv_json()? {
Some(value) => value,
None => continue,
};
match VoiceEvent::deserialize(value)? {
VoiceEvent::Ready(ready) => {
if ready.mode!= CRYPTO_MODE {
return Err(Error::Voice(VoiceError::VoiceModeInvalid));
}
return Key::from_slice(&ready.secret_key)
.ok_or(Error::Voice(VoiceError::KeyGen));
},
VoiceEvent::Unknown(op, value) => {
debug!(
| {
for i in 0..1920 {
let sample_index = if true_stereo { i } else { i/2 };
let sample = (raw_buffer[sample_index] as f32) / 32768.0;
float_buffer[i] = float_buffer[i] + sample * volume;
}
} | identifier_body |
initializer.rs | //! Reactor used to initialize a node.
use std::fmt::{self, Display, Formatter};
use datasize::DataSize;
use derive_more::From;
use prometheus::Registry;
use reactor::ReactorEvent;
use serde::Serialize;
use thiserror::Error;
use tracing::info;
use crate::{
components::{
chainspec_loader::{self, ChainspecLoader},
contract_runtime::{self, ContractRuntime},
gossiper,
network::NetworkIdentity,
small_network::{GossipedAddress, SmallNetworkIdentity, SmallNetworkIdentityError},
storage::{self, Storage},
Component,
},
effect::{
announcements::{
ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement,
},
requests::{
ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest,
RestRequest, StateStoreRequest, StorageRequest,
},
EffectBuilder, Effects,
},
protocol::Message,
reactor::{self, participating, EventQueueHandle, ReactorExit},
types::{chainspec, NodeId},
utils::WithDir,
NodeRng,
};
/// Top-level event for the reactor.
#[derive(Debug, From, Serialize)]
#[must_use]
pub enum Event {
/// Chainspec handler event.
#[from]
Chainspec(chainspec_loader::Event),
/// Storage event.
#[from]
Storage(#[serde(skip_serializing)] storage::Event),
/// Contract runtime event.
#[from]
ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event),
/// Request for state storage.
#[from]
StateStoreRequest(StateStoreRequest),
/// Control announcement
#[from]
ControlAnnouncement(ControlAnnouncement),
}
impl ReactorEvent for Event {
fn as_control(&self) -> Option<&ControlAnnouncement> {
if let Self::ControlAnnouncement(ref ctrl_ann) = self {
Some(ctrl_ann)
} else {
None
}
}
}
impl From<StorageRequest> for Event {
fn from(request: StorageRequest) -> Self {
Event::Storage(storage::Event::StorageRequest(request))
}
}
impl From<ContractRuntimeRequest> for Event {
fn from(request: ContractRuntimeRequest) -> Self {
Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request)))
}
}
impl From<NetworkRequest<NodeId, Message>> for Event {
fn from(_request: NetworkRequest<NodeId, Message>) -> Self {
unreachable!("no network traffic happens during initialization")
}
}
impl From<ChainspecLoaderAnnouncement> for Event {
fn from(_announcement: ChainspecLoaderAnnouncement) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<LinearChainRequest<NodeId>> for Event {
fn from(_req: LinearChainRequest<NodeId>) -> Self {
unreachable!("no linear chain events happen during initialization")
}
}
impl From<NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>> for Event {
fn from(_request: NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>) -> Self {
unreachable!("no gossiper events happen during initialization")
}
}
impl From<ConsensusRequest> for Event {
fn from(_request: ConsensusRequest) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<RestRequest<NodeId>> for Event {
fn from(_request: RestRequest<NodeId>) -> Self {
unreachable!("no rest requests happen during initialization")
}
}
impl From<ContractRuntimeAnnouncement> for Event {
fn from(_request: ContractRuntimeAnnouncement) -> Self {
unreachable!("no block executor requests happen during initialization")
}
}
impl Display for Event {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
Event::Chainspec(event) => write!(formatter, "chainspec: {}", event),
Event::Storage(event) => write!(formatter, "storage: {}", event),
Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event),
Event::StateStoreRequest(request) => {
write!(formatter, "state store request: {}", request)
}
Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann),
}
}
}
/// Error type returned by the initializer reactor.
#[derive(Debug, Error)]
pub enum Error {
/// `Config` error.
#[error("config error: {0}")]
ConfigError(String),
/// Metrics-related error
#[error("prometheus (metrics) error: {0}")]
Metrics(#[from] prometheus::Error),
/// `ChainspecHandler` component error.
#[error("chainspec error: {0}")]
Chainspec(#[from] chainspec::Error),
/// `Storage` component error.
#[error("storage error: {0}")]
Storage(#[from] storage::Error),
/// `ContractRuntime` component error.
#[error("contract runtime config error: {0}")]
ContractRuntime(#[from] contract_runtime::ConfigError),
/// An error that occurred when creating a `SmallNetworkIdentity`.
#[error(transparent)]
SmallNetworkIdentityError(#[from] SmallNetworkIdentityError),
}
/// Initializer node reactor.
#[derive(DataSize, Debug)]
pub struct Reactor {
pub(super) config: WithDir<participating::Config>,
pub(super) chainspec_loader: ChainspecLoader,
pub(super) storage: Storage,
pub(super) contract_runtime: ContractRuntime,
pub(super) small_network_identity: SmallNetworkIdentity,
#[data_size(skip)]
pub(super) network_identity: NetworkIdentity,
}
impl Reactor {
fn new_with_chainspec_loader(
(crashed, config): <Self as reactor::Reactor>::Config,
registry: &Registry,
chainspec_loader: ChainspecLoader,
chainspec_effects: Effects<chainspec_loader::Event>,
) -> Result<(Self, Effects<Event>), Error> {
let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era();
let storage_config = config.map_ref(|cfg| cfg.storage.clone());
let storage = Storage::new(
&storage_config,
hard_reset_to_start_of_era,
chainspec_loader.chainspec().protocol_config.version,
crashed,
)?;
let contract_runtime = ContractRuntime::new(
chainspec_loader.initial_state_root_hash(),
chainspec_loader.initial_block_header(),
chainspec_loader.chainspec().protocol_config.version,
storage_config,
&config.value().contract_runtime,
registry,
)?;
// TODO: This integrity check is misplaced, it should be part of the components
// `handle_event` function. Ideally it would be in the constructor, but since a query to
// storage needs to be made, this is not possible.
//
// Refactoring this has been postponed for now, since it is unclear whether time-consuming
// integrity checks are even a good idea, as they can block the node for one or more hours
// on restarts (online checks are an alternative).
if crashed {
info!("running trie-store integrity check, this may take a while");
if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() {
let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone());
if!missing_trie_keys.is_empty() {
panic!(
"Fatal error! Trie-Key store is not empty.\n {:?}\n \
Wipe the DB to ensure operations.\n Present state_roots: {:?}",
missing_trie_keys, state_roots
)
}
}
}
let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects);
let small_network_identity = SmallNetworkIdentity::new()?;
let network_identity = NetworkIdentity::new();
let reactor = Reactor {
config,
chainspec_loader,
storage,
contract_runtime,
small_network_identity,
network_identity,
};
Ok((reactor, effects))
}
}
#[cfg(test)]
impl Reactor {
/// Inspect storage.
pub fn storage(&self) -> &Storage {
&self.storage
}
}
impl reactor::Reactor for Reactor {
type Event = Event;
type Config = (bool, WithDir<participating::Config>);
type Error = Error;
fn new(
config: Self::Config,
registry: &Registry,
event_queue: EventQueueHandle<Self::Event>,
_rng: &mut NodeRng,
) -> Result<(Self, Effects<Self::Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
// Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid.
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new(config.1.dir(), effect_builder)?;
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
fn dispatch_event(
&mut self,
effect_builder: EffectBuilder<Self::Event>,
rng: &mut NodeRng,
event: Event,
) -> Effects<Self::Event> {
match event {
Event::Chainspec(event) => reactor::wrap_effects(
Event::Chainspec,
self.chainspec_loader
.handle_event(effect_builder, rng, event),
),
Event::Storage(event) => reactor::wrap_effects(
Event::Storage,
self.storage.handle_event(effect_builder, rng, event),
),
Event::ContractRuntime(event) => reactor::wrap_effects(
Event::ContractRuntime,
self.contract_runtime
.handle_event(effect_builder, rng, event),
),
Event::StateStoreRequest(request) => {
self.dispatch_event(effect_builder, rng, Event::Storage(request.into()))
}
Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"),
}
}
fn | (&self) -> Option<ReactorExit> {
self.chainspec_loader.reactor_exit()
}
}
#[cfg(test)]
pub mod test {
use super::*;
use crate::{
components::network::ENABLE_LIBP2P_NET_ENV_VAR, testing::network::NetworkedReactor,
types::Chainspec,
};
use std::{env, sync::Arc};
impl Reactor {
pub(crate) fn new_with_chainspec(
config: <Self as reactor::Reactor>::Config,
registry: &Registry,
event_queue: EventQueueHandle<Event>,
chainspec: Arc<Chainspec>,
) -> Result<(Self, Effects<Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new_with_chainspec(chainspec, effect_builder);
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
}
impl NetworkedReactor for Reactor {
type NodeId = NodeId;
fn node_id(&self) -> Self::NodeId {
if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() {
NodeId::from(&self.small_network_identity)
} else {
NodeId::from(&self.network_identity)
}
}
}
}
| maybe_exit | identifier_name |
initializer.rs | //! Reactor used to initialize a node.
use std::fmt::{self, Display, Formatter};
use datasize::DataSize;
use derive_more::From;
use prometheus::Registry;
use reactor::ReactorEvent;
use serde::Serialize;
use thiserror::Error;
use tracing::info;
use crate::{
components::{
chainspec_loader::{self, ChainspecLoader},
contract_runtime::{self, ContractRuntime},
gossiper,
network::NetworkIdentity,
small_network::{GossipedAddress, SmallNetworkIdentity, SmallNetworkIdentityError},
storage::{self, Storage},
Component,
},
effect::{
announcements::{
ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement,
},
requests::{
ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest,
RestRequest, StateStoreRequest, StorageRequest,
},
EffectBuilder, Effects,
},
protocol::Message,
reactor::{self, participating, EventQueueHandle, ReactorExit},
types::{chainspec, NodeId},
utils::WithDir,
NodeRng,
};
/// Top-level event for the reactor.
#[derive(Debug, From, Serialize)]
#[must_use]
pub enum Event {
/// Chainspec handler event.
#[from]
Chainspec(chainspec_loader::Event),
/// Storage event.
#[from]
Storage(#[serde(skip_serializing)] storage::Event),
/// Contract runtime event.
#[from]
ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event),
/// Request for state storage.
#[from]
StateStoreRequest(StateStoreRequest),
/// Control announcement
#[from]
ControlAnnouncement(ControlAnnouncement),
}
impl ReactorEvent for Event {
fn as_control(&self) -> Option<&ControlAnnouncement> {
if let Self::ControlAnnouncement(ref ctrl_ann) = self {
Some(ctrl_ann)
} else {
None
}
}
}
impl From<StorageRequest> for Event {
fn from(request: StorageRequest) -> Self {
Event::Storage(storage::Event::StorageRequest(request))
}
}
impl From<ContractRuntimeRequest> for Event {
fn from(request: ContractRuntimeRequest) -> Self {
Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request)))
}
}
impl From<NetworkRequest<NodeId, Message>> for Event {
fn from(_request: NetworkRequest<NodeId, Message>) -> Self {
unreachable!("no network traffic happens during initialization")
}
}
impl From<ChainspecLoaderAnnouncement> for Event {
fn from(_announcement: ChainspecLoaderAnnouncement) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<LinearChainRequest<NodeId>> for Event {
fn from(_req: LinearChainRequest<NodeId>) -> Self {
unreachable!("no linear chain events happen during initialization")
}
}
impl From<NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>> for Event {
fn from(_request: NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>) -> Self {
unreachable!("no gossiper events happen during initialization")
}
}
impl From<ConsensusRequest> for Event {
fn from(_request: ConsensusRequest) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<RestRequest<NodeId>> for Event {
fn from(_request: RestRequest<NodeId>) -> Self {
unreachable!("no rest requests happen during initialization")
}
}
impl From<ContractRuntimeAnnouncement> for Event {
fn from(_request: ContractRuntimeAnnouncement) -> Self {
unreachable!("no block executor requests happen during initialization")
}
}
impl Display for Event {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
Event::Chainspec(event) => write!(formatter, "chainspec: {}", event),
Event::Storage(event) => write!(formatter, "storage: {}", event),
Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event),
Event::StateStoreRequest(request) => {
write!(formatter, "state store request: {}", request)
}
Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann),
}
}
}
/// Error type returned by the initializer reactor.
#[derive(Debug, Error)]
pub enum Error {
/// `Config` error.
#[error("config error: {0}")]
ConfigError(String),
/// Metrics-related error
#[error("prometheus (metrics) error: {0}")]
Metrics(#[from] prometheus::Error),
/// `ChainspecHandler` component error.
#[error("chainspec error: {0}")]
Chainspec(#[from] chainspec::Error),
/// `Storage` component error.
#[error("storage error: {0}")]
Storage(#[from] storage::Error),
/// `ContractRuntime` component error.
#[error("contract runtime config error: {0}")]
ContractRuntime(#[from] contract_runtime::ConfigError),
/// An error that occurred when creating a `SmallNetworkIdentity`.
#[error(transparent)]
SmallNetworkIdentityError(#[from] SmallNetworkIdentityError),
}
/// Initializer node reactor.
#[derive(DataSize, Debug)]
pub struct Reactor {
pub(super) config: WithDir<participating::Config>,
pub(super) chainspec_loader: ChainspecLoader,
pub(super) storage: Storage,
pub(super) contract_runtime: ContractRuntime,
pub(super) small_network_identity: SmallNetworkIdentity,
#[data_size(skip)]
pub(super) network_identity: NetworkIdentity,
}
impl Reactor {
fn new_with_chainspec_loader(
(crashed, config): <Self as reactor::Reactor>::Config,
registry: &Registry,
chainspec_loader: ChainspecLoader,
chainspec_effects: Effects<chainspec_loader::Event>,
) -> Result<(Self, Effects<Event>), Error> {
let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era();
let storage_config = config.map_ref(|cfg| cfg.storage.clone());
let storage = Storage::new(
&storage_config,
hard_reset_to_start_of_era,
chainspec_loader.chainspec().protocol_config.version,
crashed,
)?;
let contract_runtime = ContractRuntime::new(
chainspec_loader.initial_state_root_hash(),
chainspec_loader.initial_block_header(),
chainspec_loader.chainspec().protocol_config.version,
storage_config,
&config.value().contract_runtime,
registry,
)?;
// TODO: This integrity check is misplaced, it should be part of the components
// `handle_event` function. Ideally it would be in the constructor, but since a query to
// storage needs to be made, this is not possible.
//
// Refactoring this has been postponed for now, since it is unclear whether time-consuming
// integrity checks are even a good idea, as they can block the node for one or more hours
// on restarts (online checks are an alternative).
if crashed {
info!("running trie-store integrity check, this may take a while");
if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() {
let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone());
if!missing_trie_keys.is_empty() {
panic!(
"Fatal error! Trie-Key store is not empty.\n {:?}\n \
Wipe the DB to ensure operations.\n Present state_roots: {:?}",
missing_trie_keys, state_roots
)
}
}
}
let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects);
let small_network_identity = SmallNetworkIdentity::new()?;
let network_identity = NetworkIdentity::new();
let reactor = Reactor {
config,
chainspec_loader,
storage,
contract_runtime,
small_network_identity,
network_identity,
};
Ok((reactor, effects))
}
}
#[cfg(test)]
impl Reactor {
/// Inspect storage.
pub fn storage(&self) -> &Storage {
&self.storage
}
}
impl reactor::Reactor for Reactor {
type Event = Event;
type Config = (bool, WithDir<participating::Config>);
type Error = Error;
fn new(
config: Self::Config,
registry: &Registry,
event_queue: EventQueueHandle<Self::Event>,
_rng: &mut NodeRng,
) -> Result<(Self, Effects<Self::Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
// Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid.
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new(config.1.dir(), effect_builder)?;
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
fn dispatch_event(
&mut self,
effect_builder: EffectBuilder<Self::Event>,
rng: &mut NodeRng,
event: Event,
) -> Effects<Self::Event> {
match event {
Event::Chainspec(event) => reactor::wrap_effects(
Event::Chainspec,
self.chainspec_loader
.handle_event(effect_builder, rng, event),
),
Event::Storage(event) => reactor::wrap_effects(
Event::Storage,
self.storage.handle_event(effect_builder, rng, event),
),
Event::ContractRuntime(event) => reactor::wrap_effects(
Event::ContractRuntime,
self.contract_runtime
.handle_event(effect_builder, rng, event),
),
Event::StateStoreRequest(request) => {
self.dispatch_event(effect_builder, rng, Event::Storage(request.into()))
}
Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"),
}
}
fn maybe_exit(&self) -> Option<ReactorExit> {
self.chainspec_loader.reactor_exit()
}
}
#[cfg(test)]
pub mod test {
use super::*;
use crate::{
components::network::ENABLE_LIBP2P_NET_ENV_VAR, testing::network::NetworkedReactor,
types::Chainspec,
};
use std::{env, sync::Arc};
impl Reactor {
pub(crate) fn new_with_chainspec(
config: <Self as reactor::Reactor>::Config,
registry: &Registry,
event_queue: EventQueueHandle<Event>,
chainspec: Arc<Chainspec>,
) -> Result<(Self, Effects<Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new_with_chainspec(chainspec, effect_builder);
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
}
impl NetworkedReactor for Reactor {
type NodeId = NodeId;
fn node_id(&self) -> Self::NodeId {
if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() {
NodeId::from(&self.small_network_identity)
} else { | NodeId::from(&self.network_identity)
}
}
}
} | random_line_split |
|
initializer.rs | //! Reactor used to initialize a node.
use std::fmt::{self, Display, Formatter};
use datasize::DataSize;
use derive_more::From;
use prometheus::Registry;
use reactor::ReactorEvent;
use serde::Serialize;
use thiserror::Error;
use tracing::info;
use crate::{
components::{
chainspec_loader::{self, ChainspecLoader},
contract_runtime::{self, ContractRuntime},
gossiper,
network::NetworkIdentity,
small_network::{GossipedAddress, SmallNetworkIdentity, SmallNetworkIdentityError},
storage::{self, Storage},
Component,
},
effect::{
announcements::{
ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement,
},
requests::{
ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest,
RestRequest, StateStoreRequest, StorageRequest,
},
EffectBuilder, Effects,
},
protocol::Message,
reactor::{self, participating, EventQueueHandle, ReactorExit},
types::{chainspec, NodeId},
utils::WithDir,
NodeRng,
};
/// Top-level event for the reactor.
#[derive(Debug, From, Serialize)]
#[must_use]
pub enum Event {
/// Chainspec handler event.
#[from]
Chainspec(chainspec_loader::Event),
/// Storage event.
#[from]
Storage(#[serde(skip_serializing)] storage::Event),
/// Contract runtime event.
#[from]
ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event),
/// Request for state storage.
#[from]
StateStoreRequest(StateStoreRequest),
/// Control announcement
#[from]
ControlAnnouncement(ControlAnnouncement),
}
impl ReactorEvent for Event {
fn as_control(&self) -> Option<&ControlAnnouncement> {
if let Self::ControlAnnouncement(ref ctrl_ann) = self {
Some(ctrl_ann)
} else {
None
}
}
}
impl From<StorageRequest> for Event {
fn from(request: StorageRequest) -> Self {
Event::Storage(storage::Event::StorageRequest(request))
}
}
impl From<ContractRuntimeRequest> for Event {
fn from(request: ContractRuntimeRequest) -> Self {
Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request)))
}
}
impl From<NetworkRequest<NodeId, Message>> for Event {
fn from(_request: NetworkRequest<NodeId, Message>) -> Self {
unreachable!("no network traffic happens during initialization")
}
}
impl From<ChainspecLoaderAnnouncement> for Event {
fn from(_announcement: ChainspecLoaderAnnouncement) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<LinearChainRequest<NodeId>> for Event {
fn from(_req: LinearChainRequest<NodeId>) -> Self |
}
impl From<NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>> for Event {
fn from(_request: NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>) -> Self {
unreachable!("no gossiper events happen during initialization")
}
}
impl From<ConsensusRequest> for Event {
fn from(_request: ConsensusRequest) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<RestRequest<NodeId>> for Event {
fn from(_request: RestRequest<NodeId>) -> Self {
unreachable!("no rest requests happen during initialization")
}
}
impl From<ContractRuntimeAnnouncement> for Event {
fn from(_request: ContractRuntimeAnnouncement) -> Self {
unreachable!("no block executor requests happen during initialization")
}
}
impl Display for Event {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
Event::Chainspec(event) => write!(formatter, "chainspec: {}", event),
Event::Storage(event) => write!(formatter, "storage: {}", event),
Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event),
Event::StateStoreRequest(request) => {
write!(formatter, "state store request: {}", request)
}
Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann),
}
}
}
/// Error type returned by the initializer reactor.
#[derive(Debug, Error)]
pub enum Error {
/// `Config` error.
#[error("config error: {0}")]
ConfigError(String),
/// Metrics-related error
#[error("prometheus (metrics) error: {0}")]
Metrics(#[from] prometheus::Error),
/// `ChainspecHandler` component error.
#[error("chainspec error: {0}")]
Chainspec(#[from] chainspec::Error),
/// `Storage` component error.
#[error("storage error: {0}")]
Storage(#[from] storage::Error),
/// `ContractRuntime` component error.
#[error("contract runtime config error: {0}")]
ContractRuntime(#[from] contract_runtime::ConfigError),
/// An error that occurred when creating a `SmallNetworkIdentity`.
#[error(transparent)]
SmallNetworkIdentityError(#[from] SmallNetworkIdentityError),
}
/// Initializer node reactor.
#[derive(DataSize, Debug)]
pub struct Reactor {
pub(super) config: WithDir<participating::Config>,
pub(super) chainspec_loader: ChainspecLoader,
pub(super) storage: Storage,
pub(super) contract_runtime: ContractRuntime,
pub(super) small_network_identity: SmallNetworkIdentity,
#[data_size(skip)]
pub(super) network_identity: NetworkIdentity,
}
impl Reactor {
fn new_with_chainspec_loader(
(crashed, config): <Self as reactor::Reactor>::Config,
registry: &Registry,
chainspec_loader: ChainspecLoader,
chainspec_effects: Effects<chainspec_loader::Event>,
) -> Result<(Self, Effects<Event>), Error> {
let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era();
let storage_config = config.map_ref(|cfg| cfg.storage.clone());
let storage = Storage::new(
&storage_config,
hard_reset_to_start_of_era,
chainspec_loader.chainspec().protocol_config.version,
crashed,
)?;
let contract_runtime = ContractRuntime::new(
chainspec_loader.initial_state_root_hash(),
chainspec_loader.initial_block_header(),
chainspec_loader.chainspec().protocol_config.version,
storage_config,
&config.value().contract_runtime,
registry,
)?;
// TODO: This integrity check is misplaced, it should be part of the components
// `handle_event` function. Ideally it would be in the constructor, but since a query to
// storage needs to be made, this is not possible.
//
// Refactoring this has been postponed for now, since it is unclear whether time-consuming
// integrity checks are even a good idea, as they can block the node for one or more hours
// on restarts (online checks are an alternative).
if crashed {
info!("running trie-store integrity check, this may take a while");
if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() {
let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone());
if!missing_trie_keys.is_empty() {
panic!(
"Fatal error! Trie-Key store is not empty.\n {:?}\n \
Wipe the DB to ensure operations.\n Present state_roots: {:?}",
missing_trie_keys, state_roots
)
}
}
}
let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects);
let small_network_identity = SmallNetworkIdentity::new()?;
let network_identity = NetworkIdentity::new();
let reactor = Reactor {
config,
chainspec_loader,
storage,
contract_runtime,
small_network_identity,
network_identity,
};
Ok((reactor, effects))
}
}
#[cfg(test)]
impl Reactor {
/// Inspect storage.
pub fn storage(&self) -> &Storage {
&self.storage
}
}
impl reactor::Reactor for Reactor {
type Event = Event;
type Config = (bool, WithDir<participating::Config>);
type Error = Error;
fn new(
config: Self::Config,
registry: &Registry,
event_queue: EventQueueHandle<Self::Event>,
_rng: &mut NodeRng,
) -> Result<(Self, Effects<Self::Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
// Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid.
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new(config.1.dir(), effect_builder)?;
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
fn dispatch_event(
&mut self,
effect_builder: EffectBuilder<Self::Event>,
rng: &mut NodeRng,
event: Event,
) -> Effects<Self::Event> {
match event {
Event::Chainspec(event) => reactor::wrap_effects(
Event::Chainspec,
self.chainspec_loader
.handle_event(effect_builder, rng, event),
),
Event::Storage(event) => reactor::wrap_effects(
Event::Storage,
self.storage.handle_event(effect_builder, rng, event),
),
Event::ContractRuntime(event) => reactor::wrap_effects(
Event::ContractRuntime,
self.contract_runtime
.handle_event(effect_builder, rng, event),
),
Event::StateStoreRequest(request) => {
self.dispatch_event(effect_builder, rng, Event::Storage(request.into()))
}
Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"),
}
}
fn maybe_exit(&self) -> Option<ReactorExit> {
self.chainspec_loader.reactor_exit()
}
}
#[cfg(test)]
pub mod test {
use super::*;
use crate::{
components::network::ENABLE_LIBP2P_NET_ENV_VAR, testing::network::NetworkedReactor,
types::Chainspec,
};
use std::{env, sync::Arc};
impl Reactor {
pub(crate) fn new_with_chainspec(
config: <Self as reactor::Reactor>::Config,
registry: &Registry,
event_queue: EventQueueHandle<Event>,
chainspec: Arc<Chainspec>,
) -> Result<(Self, Effects<Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new_with_chainspec(chainspec, effect_builder);
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
}
impl NetworkedReactor for Reactor {
type NodeId = NodeId;
fn node_id(&self) -> Self::NodeId {
if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() {
NodeId::from(&self.small_network_identity)
} else {
NodeId::from(&self.network_identity)
}
}
}
}
| {
unreachable!("no linear chain events happen during initialization")
} | identifier_body |
main.rs | use anyhow::{anyhow, Result};
use colored::*;
use dunce::canonicalize;
use git2::{Commit, ObjectType, Oid, Repository, Tree};
use regex::Regex;
use std::{
collections::{HashMap, HashSet},
convert::{TryFrom, TryInto},
env,
ffi::OsString,
path::{Path, PathBuf},
};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct | {
#[structopt(
help = "The pattern to search for. Shall be a regular expression passed to regex crate."
)]
pattern: String,
#[structopt(help = "Root repo to grep")]
repo: Option<PathBuf>,
#[structopt(short, long, help = "Branch name")]
branch: Option<String>,
#[structopt(
short,
long,
help = "Search from all branches. Ignores -b option if given"
)]
all: bool,
#[structopt(short, long, help = "Depth to search into git commit history")]
depth: Option<usize>,
#[structopt(
short = "o",
long,
help = "Turn off showing matches to a file only once; the default behavior is that if the same file with the same name has different versions that matches, they will not be printed."
)]
no_once_file: bool,
#[structopt(
short = "c",
long,
help = "Disable color coding for the output, default is to use colors in terminal"
)]
no_color_code: bool,
#[structopt(
short = "g",
long,
help = "Disable output grouping. Better for machine inputs"
)]
no_output_grouping: bool,
#[structopt(short, long, help = "Verbose flag")]
verbose: bool,
#[structopt(short, long, help = "Add an entry to list of extensions to search")]
extensions: Vec<String>,
#[structopt(
short,
long,
help = "Add an entry to list of directory names to ignore"
)]
ignore_dirs: Vec<String>,
}
fn main() -> Result<()> {
let settings: Settings = Opt::from_args().try_into()?;
eprintln!(
"Searching path: {:?} extensions: {:?} ignore_dirs: {:?}",
settings.repo, settings.extensions, settings.ignore_dirs
);
let _file_list = process_files_git(&settings.repo, &settings)?;
Ok(())
}
#[allow(dead_code)]
struct MatchEntry {
commit: Oid,
path: PathBuf,
start: usize,
end: usize,
}
#[derive(Debug)]
struct Settings {
pattern: Regex,
repo: PathBuf,
branch: Option<String>,
all: bool,
depth: Option<usize>,
once_file: bool,
color_code: bool,
output_grouping: bool,
verbose: bool,
extensions: HashSet<OsString>,
ignore_dirs: HashSet<OsString>,
}
// It's a bit awkward to convert from Opt to Settings, but some settings are hard to write
// conversion code inside structopt annotations.
impl TryFrom<Opt> for Settings {
type Error = anyhow::Error;
fn try_from(src: Opt) -> std::result::Result<Self, Self::Error> {
let default_exts = [
".sh", ".js", ".tcl", ".pl", ".py", ".rb", ".c", ".cpp", ".h", ".rc", ".rci", ".dlg",
".pas", ".dpr", ".cs", ".rs",
];
let default_ignore_dirs = [".hg", ".svn", ".git", ".bzr", "node_modules", "target"]; // Probably we could ignore all directories beginning with a dot.
Ok(Self {
pattern: Regex::new(&src.pattern)
.map_err(|e| anyhow!("Error in regex compilation: {:?}", e))?,
repo: canonicalize(
src.repo.unwrap_or_else(|| {
PathBuf::from(env::current_dir().unwrap().to_str().unwrap())
}),
)
.expect("Canonicalized path"),
branch: src.branch,
all: src.all,
depth: src.depth,
once_file:!src.no_once_file,
color_code:!src.no_color_code,
output_grouping:!src.no_output_grouping,
verbose: src.verbose,
extensions: if src.extensions.is_empty() {
default_exts.iter().map(|ext| ext[1..].into()).collect()
} else {
default_exts
.iter()
.map(|ext| ext[1..].into())
.chain(src.extensions.iter().map(|ext| ext[1..].into()))
.collect()
},
ignore_dirs: if src.ignore_dirs.is_empty() {
default_ignore_dirs.iter().map(|ext| ext.into()).collect()
} else {
default_ignore_dirs
.iter()
.map(|ext| ext.into())
.chain(src.ignore_dirs.iter().map(|ext| ext.into()))
.collect()
},
})
}
}
struct ProcessTree<'a> {
settings: &'a Settings,
repo: &'a Repository,
checked_paths: HashSet<PathBuf>,
checked_blobs: HashSet<Oid>,
checked_trees: HashSet<Oid>,
walked: usize,
skipped_blobs: usize,
all_matches: Vec<MatchEntry>,
}
impl<'a> ProcessTree<'a> {
fn process(&mut self, tree: &Tree, commit: &Commit, path: &Path, visited: &mut bool) {
if self.checked_trees.contains(&tree.id()) {
return;
}
self.checked_trees.insert(tree.id());
self.walked += 1;
for entry in tree {
match (|| {
let name = entry.name()?;
let entry_path = path.join(name);
// We want to match with absolute path from root, but it seems impossible with `tree.walk`.
if self.settings.once_file && self.checked_paths.contains(&entry_path) {
return None;
}
self.checked_paths.insert(entry_path.clone());
let obj = match entry.to_object(&self.repo) {
Ok(obj) => obj,
Err(e) => {
eprintln!("couldn't get_object: {:?}", e);
return None;
}
};
if obj.kind() == Some(ObjectType::Tree) {
self.process(obj.as_tree()?, commit, &entry_path, visited);
return None;
}
if entry.kind()!= Some(ObjectType::Blob)
|| self.settings.ignore_dirs.contains(&OsString::from(name))
{
return None;
}
let blob = obj.peel_to_blob().ok()?;
if blob.is_binary() {
return None;
}
let ext = PathBuf::from(name).extension()?.to_owned();
if!self.settings.extensions.contains(&ext.to_ascii_lowercase()) {
return None;
}
if self.checked_blobs.contains(&blob.id()) {
self.skipped_blobs += 1;
return None;
}
self.checked_blobs.insert(blob.id());
let ret = process_file(self.settings, commit, blob.content(), &entry_path, visited);
Some(ret)
})() {
Some(matches) => {
self.all_matches.extend(matches);
}
_ => (),
}
}
}
}
fn process_files_git(_root: &Path, settings: &Settings) -> Result<Vec<MatchEntry>> {
let repo = Repository::open(&settings.repo)?;
let reference = if let Some(ref branch) = settings.branch {
repo.resolve_reference_from_short_name(&branch)?
} else {
repo.head()?
};
let mut process_tree = ProcessTree {
settings,
repo: &repo,
checked_paths: HashSet::new(),
checked_blobs: HashSet::new(),
checked_trees: HashSet::new(),
walked: 0,
skipped_blobs: 0,
all_matches: vec![],
};
let mut checked_commits = HashMap::new();
let mut iter = 0;
let mut next_refs = if settings.all {
repo.references()?
.map(|refs| refs.and_then(|refb| refb.peel_to_commit()))
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
vec![reference.peel_to_commit()?]
};
loop {
for commit in &next_refs {
if checked_commits.contains_key(&commit.id()) {
continue;
}
let entry = checked_commits.entry(commit.id()).or_insert(false);
let tree = if let Ok(tree) = commit.tree() {
tree
} else {
continue;
};
process_tree.process(&tree, commit, &PathBuf::from(""), entry);
}
next_refs = next_refs
.iter()
.map(|reference| reference.parent_ids())
.flatten()
.filter(|reference|!checked_commits.contains_key(reference))
.map(|id| repo.find_commit(id))
.collect::<std::result::Result<Vec<_>, git2::Error>>()?;
if settings.verbose {
eprintln!(
"[{}] {} Matches in {} files {} skipped blobs... Next round has {} refs...",
iter,
process_tree.all_matches.len(),
process_tree.walked,
process_tree.skipped_blobs,
next_refs.len()
);
}
iter += 1;
if next_refs.is_empty() || settings.depth.map(|depth| depth <= iter).unwrap_or(false) {
break;
}
}
Ok(process_tree.all_matches)
}
fn process_file(
settings: &Settings,
commit: &Commit,
input: &[u8],
filepath: &Path,
visited: &mut bool,
) -> Vec<MatchEntry> {
let mut ret = vec![];
// Non-utf8 files are not supported.
let input_str = if let Ok(utf8) = std::str::from_utf8(&input) {
utf8
} else {
return vec![];
};
for found in settings.pattern.find_iter(&input_str) {
ret.push(MatchEntry {
commit: commit.id(),
path: filepath.to_path_buf(),
start: found.start(),
end: found.end(),
});
// Very naive way to count line numbers. Assumes newlines would not be part of multibyte
// character, which is true for utf8 that is the only supported encoding in Rust anyway.
let mut line_number = 1;
let mut line_start = 0;
let mut line_end = 0;
for (i, c) in input.iter().enumerate() {
if *c == b'\n' {
line_number += 1;
if i < found.start() {
line_start = (i + 1).min(input.len());
}
if found.end() <= i {
line_end = (i as usize).max(line_start);
break;
}
}
}
if settings.color_code {
if settings.output_grouping &&!*visited {
println!("\ncommit {}:", commit.id().to_string().bright_blue());
*visited = true;
}
let mut content = if line_start < found.start() {
input_str[line_start..found.start()].to_owned()
} else {
"".to_owned()
};
if found.start() < found.end() {
content += &input_str[found.start()..found.end()]
.red()
.bold()
.to_string();
}
if found.end() < line_end {
content += &input_str[found.end()..line_end];
}
let line = format!(
"{} {} {}",
filepath.to_string_lossy().green(),
&format!("({}):", line_number).bright_yellow(),
&content
);
if!settings.output_grouping {
println!("{} {}", commit.id().to_string().bright_blue(), line);
} else {
println!(" {}", line);
}
} else {
if settings.output_grouping &&!*visited {
println!("\ncommit {}:", commit.id());
*visited = true;
}
let line = format!(
"{}({}): {}",
filepath.to_string_lossy(),
line_number,
&input_str[line_start..line_end]
);
if!settings.output_grouping {
println!("{} {}", commit.id(), line);
} else {
println!(" {}", line);
}
}
}
ret
}
| Opt | identifier_name |
main.rs | use anyhow::{anyhow, Result};
use colored::*;
use dunce::canonicalize;
use git2::{Commit, ObjectType, Oid, Repository, Tree};
use regex::Regex;
use std::{
collections::{HashMap, HashSet},
convert::{TryFrom, TryInto},
env,
ffi::OsString,
path::{Path, PathBuf},
};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
#[structopt(
help = "The pattern to search for. Shall be a regular expression passed to regex crate."
)]
pattern: String,
#[structopt(help = "Root repo to grep")]
repo: Option<PathBuf>,
#[structopt(short, long, help = "Branch name")]
branch: Option<String>,
#[structopt(
short,
long,
help = "Search from all branches. Ignores -b option if given"
)]
all: bool,
#[structopt(short, long, help = "Depth to search into git commit history")]
depth: Option<usize>,
#[structopt(
short = "o",
long,
help = "Turn off showing matches to a file only once; the default behavior is that if the same file with the same name has different versions that matches, they will not be printed."
)]
no_once_file: bool,
#[structopt(
short = "c",
long,
help = "Disable color coding for the output, default is to use colors in terminal"
)]
no_color_code: bool,
#[structopt(
short = "g",
long,
help = "Disable output grouping. Better for machine inputs"
)]
no_output_grouping: bool,
#[structopt(short, long, help = "Verbose flag")]
verbose: bool,
#[structopt(short, long, help = "Add an entry to list of extensions to search")]
extensions: Vec<String>,
#[structopt(
short,
long,
help = "Add an entry to list of directory names to ignore"
)]
ignore_dirs: Vec<String>,
}
fn main() -> Result<()> {
let settings: Settings = Opt::from_args().try_into()?;
eprintln!(
"Searching path: {:?} extensions: {:?} ignore_dirs: {:?}",
settings.repo, settings.extensions, settings.ignore_dirs
);
let _file_list = process_files_git(&settings.repo, &settings)?;
Ok(())
}
#[allow(dead_code)]
struct MatchEntry {
commit: Oid,
path: PathBuf,
start: usize,
end: usize,
}
#[derive(Debug)]
struct Settings {
pattern: Regex,
repo: PathBuf,
branch: Option<String>,
all: bool,
depth: Option<usize>,
once_file: bool,
color_code: bool,
output_grouping: bool,
verbose: bool,
extensions: HashSet<OsString>,
ignore_dirs: HashSet<OsString>,
}
// It's a bit awkward to convert from Opt to Settings, but some settings are hard to write
// conversion code inside structopt annotations.
impl TryFrom<Opt> for Settings {
type Error = anyhow::Error;
fn try_from(src: Opt) -> std::result::Result<Self, Self::Error> {
let default_exts = [
".sh", ".js", ".tcl", ".pl", ".py", ".rb", ".c", ".cpp", ".h", ".rc", ".rci", ".dlg",
".pas", ".dpr", ".cs", ".rs",
];
let default_ignore_dirs = [".hg", ".svn", ".git", ".bzr", "node_modules", "target"]; // Probably we could ignore all directories beginning with a dot.
Ok(Self {
pattern: Regex::new(&src.pattern)
.map_err(|e| anyhow!("Error in regex compilation: {:?}", e))?,
repo: canonicalize(
src.repo.unwrap_or_else(|| {
PathBuf::from(env::current_dir().unwrap().to_str().unwrap())
}),
)
.expect("Canonicalized path"),
branch: src.branch,
all: src.all,
depth: src.depth,
once_file:!src.no_once_file,
color_code:!src.no_color_code,
output_grouping:!src.no_output_grouping,
verbose: src.verbose,
extensions: if src.extensions.is_empty() {
default_exts.iter().map(|ext| ext[1..].into()).collect()
} else {
default_exts
.iter()
.map(|ext| ext[1..].into())
.chain(src.extensions.iter().map(|ext| ext[1..].into()))
.collect()
},
ignore_dirs: if src.ignore_dirs.is_empty() {
default_ignore_dirs.iter().map(|ext| ext.into()).collect()
} else {
default_ignore_dirs
.iter()
.map(|ext| ext.into())
.chain(src.ignore_dirs.iter().map(|ext| ext.into()))
.collect()
},
})
}
}
struct ProcessTree<'a> {
settings: &'a Settings,
repo: &'a Repository,
checked_paths: HashSet<PathBuf>,
checked_blobs: HashSet<Oid>,
checked_trees: HashSet<Oid>,
walked: usize,
skipped_blobs: usize,
all_matches: Vec<MatchEntry>,
}
impl<'a> ProcessTree<'a> {
fn process(&mut self, tree: &Tree, commit: &Commit, path: &Path, visited: &mut bool) {
if self.checked_trees.contains(&tree.id()) {
return;
}
self.checked_trees.insert(tree.id());
self.walked += 1;
for entry in tree {
match (|| {
let name = entry.name()?;
let entry_path = path.join(name);
// We want to match with absolute path from root, but it seems impossible with `tree.walk`.
if self.settings.once_file && self.checked_paths.contains(&entry_path) {
return None;
}
self.checked_paths.insert(entry_path.clone());
let obj = match entry.to_object(&self.repo) {
Ok(obj) => obj,
Err(e) => {
eprintln!("couldn't get_object: {:?}", e);
return None;
}
};
if obj.kind() == Some(ObjectType::Tree) {
self.process(obj.as_tree()?, commit, &entry_path, visited);
return None;
}
if entry.kind()!= Some(ObjectType::Blob)
|| self.settings.ignore_dirs.contains(&OsString::from(name))
{
return None;
} | let blob = obj.peel_to_blob().ok()?;
if blob.is_binary() {
return None;
}
let ext = PathBuf::from(name).extension()?.to_owned();
if!self.settings.extensions.contains(&ext.to_ascii_lowercase()) {
return None;
}
if self.checked_blobs.contains(&blob.id()) {
self.skipped_blobs += 1;
return None;
}
self.checked_blobs.insert(blob.id());
let ret = process_file(self.settings, commit, blob.content(), &entry_path, visited);
Some(ret)
})() {
Some(matches) => {
self.all_matches.extend(matches);
}
_ => (),
}
}
}
}
fn process_files_git(_root: &Path, settings: &Settings) -> Result<Vec<MatchEntry>> {
let repo = Repository::open(&settings.repo)?;
let reference = if let Some(ref branch) = settings.branch {
repo.resolve_reference_from_short_name(&branch)?
} else {
repo.head()?
};
let mut process_tree = ProcessTree {
settings,
repo: &repo,
checked_paths: HashSet::new(),
checked_blobs: HashSet::new(),
checked_trees: HashSet::new(),
walked: 0,
skipped_blobs: 0,
all_matches: vec![],
};
let mut checked_commits = HashMap::new();
let mut iter = 0;
let mut next_refs = if settings.all {
repo.references()?
.map(|refs| refs.and_then(|refb| refb.peel_to_commit()))
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
vec![reference.peel_to_commit()?]
};
loop {
for commit in &next_refs {
if checked_commits.contains_key(&commit.id()) {
continue;
}
let entry = checked_commits.entry(commit.id()).or_insert(false);
let tree = if let Ok(tree) = commit.tree() {
tree
} else {
continue;
};
process_tree.process(&tree, commit, &PathBuf::from(""), entry);
}
next_refs = next_refs
.iter()
.map(|reference| reference.parent_ids())
.flatten()
.filter(|reference|!checked_commits.contains_key(reference))
.map(|id| repo.find_commit(id))
.collect::<std::result::Result<Vec<_>, git2::Error>>()?;
if settings.verbose {
eprintln!(
"[{}] {} Matches in {} files {} skipped blobs... Next round has {} refs...",
iter,
process_tree.all_matches.len(),
process_tree.walked,
process_tree.skipped_blobs,
next_refs.len()
);
}
iter += 1;
if next_refs.is_empty() || settings.depth.map(|depth| depth <= iter).unwrap_or(false) {
break;
}
}
Ok(process_tree.all_matches)
}
fn process_file(
settings: &Settings,
commit: &Commit,
input: &[u8],
filepath: &Path,
visited: &mut bool,
) -> Vec<MatchEntry> {
let mut ret = vec![];
// Non-utf8 files are not supported.
let input_str = if let Ok(utf8) = std::str::from_utf8(&input) {
utf8
} else {
return vec![];
};
for found in settings.pattern.find_iter(&input_str) {
ret.push(MatchEntry {
commit: commit.id(),
path: filepath.to_path_buf(),
start: found.start(),
end: found.end(),
});
// Very naive way to count line numbers. Assumes newlines would not be part of multibyte
// character, which is true for utf8 that is the only supported encoding in Rust anyway.
let mut line_number = 1;
let mut line_start = 0;
let mut line_end = 0;
for (i, c) in input.iter().enumerate() {
if *c == b'\n' {
line_number += 1;
if i < found.start() {
line_start = (i + 1).min(input.len());
}
if found.end() <= i {
line_end = (i as usize).max(line_start);
break;
}
}
}
if settings.color_code {
if settings.output_grouping &&!*visited {
println!("\ncommit {}:", commit.id().to_string().bright_blue());
*visited = true;
}
let mut content = if line_start < found.start() {
input_str[line_start..found.start()].to_owned()
} else {
"".to_owned()
};
if found.start() < found.end() {
content += &input_str[found.start()..found.end()]
.red()
.bold()
.to_string();
}
if found.end() < line_end {
content += &input_str[found.end()..line_end];
}
let line = format!(
"{} {} {}",
filepath.to_string_lossy().green(),
&format!("({}):", line_number).bright_yellow(),
&content
);
if!settings.output_grouping {
println!("{} {}", commit.id().to_string().bright_blue(), line);
} else {
println!(" {}", line);
}
} else {
if settings.output_grouping &&!*visited {
println!("\ncommit {}:", commit.id());
*visited = true;
}
let line = format!(
"{}({}): {}",
filepath.to_string_lossy(),
line_number,
&input_str[line_start..line_end]
);
if!settings.output_grouping {
println!("{} {}", commit.id(), line);
} else {
println!(" {}", line);
}
}
}
ret
} | random_line_split |
|
main.rs | use anyhow::{anyhow, Result};
use colored::*;
use dunce::canonicalize;
use git2::{Commit, ObjectType, Oid, Repository, Tree};
use regex::Regex;
use std::{
collections::{HashMap, HashSet},
convert::{TryFrom, TryInto},
env,
ffi::OsString,
path::{Path, PathBuf},
};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
#[structopt(
help = "The pattern to search for. Shall be a regular expression passed to regex crate."
)]
pattern: String,
#[structopt(help = "Root repo to grep")]
repo: Option<PathBuf>,
#[structopt(short, long, help = "Branch name")]
branch: Option<String>,
#[structopt(
short,
long,
help = "Search from all branches. Ignores -b option if given"
)]
all: bool,
#[structopt(short, long, help = "Depth to search into git commit history")]
depth: Option<usize>,
#[structopt(
short = "o",
long,
help = "Turn off showing matches to a file only once; the default behavior is that if the same file with the same name has different versions that matches, they will not be printed."
)]
no_once_file: bool,
#[structopt(
short = "c",
long,
help = "Disable color coding for the output, default is to use colors in terminal"
)]
no_color_code: bool,
#[structopt(
short = "g",
long,
help = "Disable output grouping. Better for machine inputs"
)]
no_output_grouping: bool,
#[structopt(short, long, help = "Verbose flag")]
verbose: bool,
#[structopt(short, long, help = "Add an entry to list of extensions to search")]
extensions: Vec<String>,
#[structopt(
short,
long,
help = "Add an entry to list of directory names to ignore"
)]
ignore_dirs: Vec<String>,
}
fn main() -> Result<()> {
let settings: Settings = Opt::from_args().try_into()?;
eprintln!(
"Searching path: {:?} extensions: {:?} ignore_dirs: {:?}",
settings.repo, settings.extensions, settings.ignore_dirs
);
let _file_list = process_files_git(&settings.repo, &settings)?;
Ok(())
}
#[allow(dead_code)]
struct MatchEntry {
commit: Oid,
path: PathBuf,
start: usize,
end: usize,
}
#[derive(Debug)]
struct Settings {
pattern: Regex,
repo: PathBuf,
branch: Option<String>,
all: bool,
depth: Option<usize>,
once_file: bool,
color_code: bool,
output_grouping: bool,
verbose: bool,
extensions: HashSet<OsString>,
ignore_dirs: HashSet<OsString>,
}
// It's a bit awkward to convert from Opt to Settings, but some settings are hard to write
// conversion code inside structopt annotations.
impl TryFrom<Opt> for Settings {
type Error = anyhow::Error;
fn try_from(src: Opt) -> std::result::Result<Self, Self::Error> {
let default_exts = [
".sh", ".js", ".tcl", ".pl", ".py", ".rb", ".c", ".cpp", ".h", ".rc", ".rci", ".dlg",
".pas", ".dpr", ".cs", ".rs",
];
let default_ignore_dirs = [".hg", ".svn", ".git", ".bzr", "node_modules", "target"]; // Probably we could ignore all directories beginning with a dot.
Ok(Self {
pattern: Regex::new(&src.pattern)
.map_err(|e| anyhow!("Error in regex compilation: {:?}", e))?,
repo: canonicalize(
src.repo.unwrap_or_else(|| {
PathBuf::from(env::current_dir().unwrap().to_str().unwrap())
}),
)
.expect("Canonicalized path"),
branch: src.branch,
all: src.all,
depth: src.depth,
once_file:!src.no_once_file,
color_code:!src.no_color_code,
output_grouping:!src.no_output_grouping,
verbose: src.verbose,
extensions: if src.extensions.is_empty() {
default_exts.iter().map(|ext| ext[1..].into()).collect()
} else {
default_exts
.iter()
.map(|ext| ext[1..].into())
.chain(src.extensions.iter().map(|ext| ext[1..].into()))
.collect()
},
ignore_dirs: if src.ignore_dirs.is_empty() {
default_ignore_dirs.iter().map(|ext| ext.into()).collect()
} else {
default_ignore_dirs
.iter()
.map(|ext| ext.into())
.chain(src.ignore_dirs.iter().map(|ext| ext.into()))
.collect()
},
})
}
}
struct ProcessTree<'a> {
settings: &'a Settings,
repo: &'a Repository,
checked_paths: HashSet<PathBuf>,
checked_blobs: HashSet<Oid>,
checked_trees: HashSet<Oid>,
walked: usize,
skipped_blobs: usize,
all_matches: Vec<MatchEntry>,
}
impl<'a> ProcessTree<'a> {
fn process(&mut self, tree: &Tree, commit: &Commit, path: &Path, visited: &mut bool) {
if self.checked_trees.contains(&tree.id()) {
return;
}
self.checked_trees.insert(tree.id());
self.walked += 1;
for entry in tree {
match (|| {
let name = entry.name()?;
let entry_path = path.join(name);
// We want to match with absolute path from root, but it seems impossible with `tree.walk`.
if self.settings.once_file && self.checked_paths.contains(&entry_path) {
return None;
}
self.checked_paths.insert(entry_path.clone());
let obj = match entry.to_object(&self.repo) {
Ok(obj) => obj,
Err(e) => {
eprintln!("couldn't get_object: {:?}", e);
return None;
}
};
if obj.kind() == Some(ObjectType::Tree) {
self.process(obj.as_tree()?, commit, &entry_path, visited);
return None;
}
if entry.kind()!= Some(ObjectType::Blob)
|| self.settings.ignore_dirs.contains(&OsString::from(name))
{
return None;
}
let blob = obj.peel_to_blob().ok()?;
if blob.is_binary() {
return None;
}
let ext = PathBuf::from(name).extension()?.to_owned();
if!self.settings.extensions.contains(&ext.to_ascii_lowercase()) {
return None;
}
if self.checked_blobs.contains(&blob.id()) {
self.skipped_blobs += 1;
return None;
}
self.checked_blobs.insert(blob.id());
let ret = process_file(self.settings, commit, blob.content(), &entry_path, visited);
Some(ret)
})() {
Some(matches) => {
self.all_matches.extend(matches);
}
_ => (),
}
}
}
}
fn process_files_git(_root: &Path, settings: &Settings) -> Result<Vec<MatchEntry>> {
let repo = Repository::open(&settings.repo)?;
let reference = if let Some(ref branch) = settings.branch {
repo.resolve_reference_from_short_name(&branch)?
} else {
repo.head()?
};
let mut process_tree = ProcessTree {
settings,
repo: &repo,
checked_paths: HashSet::new(),
checked_blobs: HashSet::new(),
checked_trees: HashSet::new(),
walked: 0,
skipped_blobs: 0,
all_matches: vec![],
};
let mut checked_commits = HashMap::new();
let mut iter = 0;
let mut next_refs = if settings.all {
repo.references()?
.map(|refs| refs.and_then(|refb| refb.peel_to_commit()))
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
vec![reference.peel_to_commit()?]
};
loop {
for commit in &next_refs {
if checked_commits.contains_key(&commit.id()) {
continue;
}
let entry = checked_commits.entry(commit.id()).or_insert(false);
let tree = if let Ok(tree) = commit.tree() {
tree
} else {
continue;
};
process_tree.process(&tree, commit, &PathBuf::from(""), entry);
}
next_refs = next_refs
.iter()
.map(|reference| reference.parent_ids())
.flatten()
.filter(|reference|!checked_commits.contains_key(reference))
.map(|id| repo.find_commit(id))
.collect::<std::result::Result<Vec<_>, git2::Error>>()?;
if settings.verbose {
eprintln!(
"[{}] {} Matches in {} files {} skipped blobs... Next round has {} refs...",
iter,
process_tree.all_matches.len(),
process_tree.walked,
process_tree.skipped_blobs,
next_refs.len()
);
}
iter += 1;
if next_refs.is_empty() || settings.depth.map(|depth| depth <= iter).unwrap_or(false) {
break;
}
}
Ok(process_tree.all_matches)
}
fn process_file(
settings: &Settings,
commit: &Commit,
input: &[u8],
filepath: &Path,
visited: &mut bool,
) -> Vec<MatchEntry> {
let mut ret = vec![];
// Non-utf8 files are not supported.
let input_str = if let Ok(utf8) = std::str::from_utf8(&input) {
utf8
} else {
return vec![];
};
for found in settings.pattern.find_iter(&input_str) {
ret.push(MatchEntry {
commit: commit.id(),
path: filepath.to_path_buf(),
start: found.start(),
end: found.end(),
});
// Very naive way to count line numbers. Assumes newlines would not be part of multibyte
// character, which is true for utf8 that is the only supported encoding in Rust anyway.
let mut line_number = 1;
let mut line_start = 0;
let mut line_end = 0;
for (i, c) in input.iter().enumerate() {
if *c == b'\n' {
line_number += 1;
if i < found.start() {
line_start = (i + 1).min(input.len());
}
if found.end() <= i |
}
}
if settings.color_code {
if settings.output_grouping &&!*visited {
println!("\ncommit {}:", commit.id().to_string().bright_blue());
*visited = true;
}
let mut content = if line_start < found.start() {
input_str[line_start..found.start()].to_owned()
} else {
"".to_owned()
};
if found.start() < found.end() {
content += &input_str[found.start()..found.end()]
.red()
.bold()
.to_string();
}
if found.end() < line_end {
content += &input_str[found.end()..line_end];
}
let line = format!(
"{} {} {}",
filepath.to_string_lossy().green(),
&format!("({}):", line_number).bright_yellow(),
&content
);
if!settings.output_grouping {
println!("{} {}", commit.id().to_string().bright_blue(), line);
} else {
println!(" {}", line);
}
} else {
if settings.output_grouping &&!*visited {
println!("\ncommit {}:", commit.id());
*visited = true;
}
let line = format!(
"{}({}): {}",
filepath.to_string_lossy(),
line_number,
&input_str[line_start..line_end]
);
if!settings.output_grouping {
println!("{} {}", commit.id(), line);
} else {
println!(" {}", line);
}
}
}
ret
}
| {
line_end = (i as usize).max(line_start);
break;
} | conditional_block |
lib.rs | //! [![github]](https://github.com/dtolnay/trybuild) [![crates-io]](https://crates.io/crates/trybuild) [![docs-rs]](https://docs.rs/trybuild)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
//!
//! <br>
//!
//! ####  A compiler diagnostics testing library in just 3 functions.
//!
//! Trybuild is a test harness for invoking rustc on a set of test cases and
//! asserting that any resulting error messages are the ones intended.
//!
//! Such tests are commonly useful for testing error reporting involving
//! procedural macros. We would write test cases triggering either errors
//! detected by the macro or errors detected by the Rust compiler in the
//! resulting expanded code, and compare against the expected errors to ensure
//! that they remain user-friendly.
//!
//! This style of testing is sometimes called *ui tests* because they test
//! aspects of the user's interaction with a library outside of what would be
//! covered by ordinary API tests.
//!
//! Nothing here is specific to macros; trybuild would work equally well for
//! testing misuse of non-macro APIs.
//!
//! <br>
//!
//! # Compile-fail tests
//!
//! A minimal trybuild setup looks like this:
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.compile_fail("tests/ui/*.rs");
//! }
//! ```
//!
//! The test can be run with `cargo test`. It will individually compile each of
//! the source files matching the glob pattern, expect them to fail to compile,
//! and assert that the compiler's error message matches an adjacently named
//! _*.stderr_ file containing the expected output (same file name as the test
//! except with a different extension). If it matches, the test case is
//! considered to succeed.
//!
//! Dependencies listed under `[dev-dependencies]` in the project's Cargo.toml
//! are accessible from within the test cases.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186574-76469e00-6e96-11e9-8cb5-b63b657170c9.png" width="700">
//! </p>
//!
//! Failing tests display the expected vs actual compiler output inline.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186575-79418e80-6e96-11e9-9478-c9b3dc10327f.png" width="700">
//! </p>
//!
//! A compile_fail test that fails to fail to compile is also a failure.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186576-7b0b5200-6e96-11e9-8bfd-2de705125108.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Pass tests
//!
//! The same test harness is able to run tests that are expected to pass, too.
//! Ordinarily you would just have Cargo run such tests directly, but being able
//! to combine modes like this could be useful for workshops in which
//! participants work through test cases enabling one at a time. Trybuild was
//! originally developed for my [procedural macros workshop at Rust
//! Latam][workshop].
//!
//! [workshop]: https://github.com/dtolnay/proc-macro-workshop
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.pass("tests/01-parse-header.rs");
//! t.pass("tests/02-parse-body.rs");
//! t.compile_fail("tests/03-expand-four-errors.rs");
//! t.pass("tests/04-paste-ident.rs");
//! t.pass("tests/05-repeat-section.rs");
//! //t.pass("tests/06-make-work-in-function.rs");
//! //t.pass("tests/07-init-array.rs");
//! //t.compile_fail("tests/08-ident-span.rs");
//! }
//! ```
//!
//! Pass tests are considered to succeed if they compile successfully and have a
//! `main` function that does not panic when the compiled binary is executed.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186580-7f376f80-6e96-11e9-9cae-8257609269ef.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Details
//!
//! That's the entire API.
//!
//! <br>
//!
//! # Workflow
//!
//! There are two ways to update the _*.stderr_ files as you iterate on your
//! test cases or your library; handwriting them is not recommended.
//!
//! First, if a test case is being run as compile_fail but a corresponding
//! _*.stderr_ file does not exist, the test runner will save the actual
//! compiler output with the right filename into a directory called *wip* within
//! the directory containing Cargo.toml. So you can update these files by
//! deleting them, running `cargo test`, and moving all the files from *wip*
//! into your testcase directory.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186579-7cd51580-6e96-11e9-9f19-54dcecc9fbba.png" width="700">
//! </p>
//!
//! Alternatively, run `cargo test` with the environment variable
//! `TRYBUILD=overwrite` to skip the *wip* directory and write all compiler
//! output directly in place. You'll want to check `git diff` afterward to be
//! sure the compiler's output is what you had in mind.
//!
//! <br>
//!
//! # What to test
//!
//! When it comes to compile-fail tests, write tests for anything for which you
//! care to find out when there are changes in the user-facing compiler output.
//! As a negative example, please don't write compile-fail tests simply calling
//! all of your public APIs with arguments of the wrong type; there would be no
//! benefit.
//!
//! A common use would be for testing specific targeted error messages emitted
//! by a procedural macro. For example the derive macro from the [`ref-cast`]
//! crate is required to be placed on a type that has either `#[repr(C)]` or
//! `#[repr(transparent)]` in order for the expansion to be free of undefined
//! behavior, which it enforces at compile time:
//!
//! [`ref-cast`]: https://github.com/dtolnay/ref-cast
//!
//! ```console
//! error: RefCast trait requires #[repr(C)] or #[repr(transparent)]
//! --> $DIR/missing-repr.rs:3:10
//! |
//! 3 | #[derive(RefCast)]
//! | ^^^^^^^
//! ```
//!
//! Macros that consume helper attributes will want to check that unrecognized
//! content within those attributes is properly indicated to the caller. Is the
//! error message correctly placed under the erroneous tokens, not on a useless
//! call\_site span?
//!
//! ```console
//! error: unknown serde field attribute `qqq`
//! --> $DIR/unknown-attribute.rs:5:13
//! |
//! 5 | #[serde(qqq = "...")]
//! | ^^^
//! ```
//!
//! Declarative macros can benefit from compile-fail tests too. The [`json!`]
//! macro from serde\_json is just a great big macro\_rules macro but makes an
//! effort to have error messages from broken JSON in the input always appear on
//! the most appropriate token:
//!
//! [`json!`]: https://docs.rs/serde_json/1.0/serde_json/macro.json.html
//!
//! ```console
//! error: no rules expected the token `,`
//! --> $DIR/double-comma.rs:4:38
//! |
//! 4 | println!("{}", json!({ "k": null,, }));
//! | ^ no rules expected this token in macro call
//! ```
//!
//! Sometimes we may have a macro that expands successfully but we count on it
//! to trigger particular compiler errors at some point beyond macro expansion.
//! For example the [`readonly`] crate introduces struct fields that are public
//! but readable only, even if the caller has a &mut reference to the
//! surrounding struct. If someone writes to a readonly field, we need to be
//! sure that it wouldn't compile:
//!
//! [`readonly`]: https://github.com/dtolnay/readonly
//!
//! ```console
//! error[E0594]: cannot assign to data in a `&` reference
//! --> $DIR/write-a-readonly.rs:17:26
//! |
//! 17 | println!("{}", s.n); s.n += 1;
//! | ^^^^^^^^ cannot assign
//! ```
//!
//! In all of these cases, the compiler's output can change because our crate or
//! one of our dependencies broke something, or as a consequence of changes in
//! the Rust compiler. Both are good reasons to have well conceived compile-fail
//! tests. If we refactor and mistakenly cause an error that used to be correct
//! to now no longer be emitted or be emitted in the wrong place, that is
//! important for a test suite to catch. If the compiler changes something that
//! makes error messages that we care about substantially worse, it is also
//! important to catch and report as a compiler issue.
#![doc(html_root_url = "https://docs.rs/trybuild/1.0.83")]
#![allow(
clippy::collapsible_if,
clippy::default_trait_access,
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::enum_glob_use,
clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285
clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410
clippy::manual_assert,
clippy::manual_range_contains,
clippy::module_inception,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::range_plus_one,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::trivially_copy_pass_by_ref,
clippy::unused_self,
clippy::while_let_on_iterator,
)]
#![deny(clippy::clone_on_ref_ptr)]
#[macro_use]
mod term;
#[macro_use]
mod path;
mod cargo;
mod dependencies;
mod diff;
mod directory;
mod env;
mod error;
mod expand;
mod features;
mod flock;
mod inherit;
mod manifest;
mod message;
mod normalize;
mod run;
mod rustflags;
use std::cell::RefCell;
use std::panic::RefUnwindSafe;
use std::path::{Path, PathBuf};
use std::thread;
#[derive(Debug)]
pub struct TestCases {
runner: RefCell<Runner>,
}
#[derive(Debug)]
struct Runner {
tests: Vec<Test>,
}
#[derive(Clone, Debug)]
struct Test {
path: PathBuf,
expected: Expected,
}
#[derive(Copy, Clone, Debug)]
enum Expected {
Pass,
CompileFail,
}
impl TestCases {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
TestCases {
runner: RefCell::new(Runner { tests: Vec::new() }),
}
}
pub fn pass<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::Pass,
});
}
pub fn compile_fail<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::CompileFail,
});
}
}
impl RefUnwindSafe for TestCases {}
#[doc(hidden)]
impl Drop for TestCases {
fn | (&mut self) {
if!thread::panicking() {
self.runner.borrow_mut().run();
}
}
}
| drop | identifier_name |
lib.rs | //! [![github]](https://github.com/dtolnay/trybuild) [![crates-io]](https://crates.io/crates/trybuild) [![docs-rs]](https://docs.rs/trybuild)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
//!
//! <br>
//!
//! ####  A compiler diagnostics testing library in just 3 functions.
//!
//! Trybuild is a test harness for invoking rustc on a set of test cases and
//! asserting that any resulting error messages are the ones intended.
//!
//! Such tests are commonly useful for testing error reporting involving
//! procedural macros. We would write test cases triggering either errors
//! detected by the macro or errors detected by the Rust compiler in the
//! resulting expanded code, and compare against the expected errors to ensure
//! that they remain user-friendly.
//!
//! This style of testing is sometimes called *ui tests* because they test
//! aspects of the user's interaction with a library outside of what would be
//! covered by ordinary API tests.
//!
//! Nothing here is specific to macros; trybuild would work equally well for
//! testing misuse of non-macro APIs.
//!
//! <br>
//!
//! # Compile-fail tests
//!
//! A minimal trybuild setup looks like this:
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.compile_fail("tests/ui/*.rs");
//! }
//! ```
//!
//! The test can be run with `cargo test`. It will individually compile each of
//! the source files matching the glob pattern, expect them to fail to compile,
//! and assert that the compiler's error message matches an adjacently named
//! _*.stderr_ file containing the expected output (same file name as the test
//! except with a different extension). If it matches, the test case is
//! considered to succeed.
//!
//! Dependencies listed under `[dev-dependencies]` in the project's Cargo.toml
//! are accessible from within the test cases.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186574-76469e00-6e96-11e9-8cb5-b63b657170c9.png" width="700">
//! </p>
//!
//! Failing tests display the expected vs actual compiler output inline.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186575-79418e80-6e96-11e9-9478-c9b3dc10327f.png" width="700">
//! </p>
//!
//! A compile_fail test that fails to fail to compile is also a failure.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186576-7b0b5200-6e96-11e9-8bfd-2de705125108.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Pass tests
//!
//! The same test harness is able to run tests that are expected to pass, too.
//! Ordinarily you would just have Cargo run such tests directly, but being able
//! to combine modes like this could be useful for workshops in which
//! participants work through test cases enabling one at a time. Trybuild was
//! originally developed for my [procedural macros workshop at Rust
//! Latam][workshop].
//!
//! [workshop]: https://github.com/dtolnay/proc-macro-workshop
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.pass("tests/01-parse-header.rs");
//! t.pass("tests/02-parse-body.rs");
//! t.compile_fail("tests/03-expand-four-errors.rs");
//! t.pass("tests/04-paste-ident.rs");
//! t.pass("tests/05-repeat-section.rs");
//! //t.pass("tests/06-make-work-in-function.rs");
//! //t.pass("tests/07-init-array.rs");
//! //t.compile_fail("tests/08-ident-span.rs");
//! }
//! ```
//!
//! Pass tests are considered to succeed if they compile successfully and have a
//! `main` function that does not panic when the compiled binary is executed.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186580-7f376f80-6e96-11e9-9cae-8257609269ef.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Details
//!
//! That's the entire API.
//!
//! <br>
//!
//! # Workflow
//!
//! There are two ways to update the _*.stderr_ files as you iterate on your
//! test cases or your library; handwriting them is not recommended.
//!
//! First, if a test case is being run as compile_fail but a corresponding
//! _*.stderr_ file does not exist, the test runner will save the actual
//! compiler output with the right filename into a directory called *wip* within
//! the directory containing Cargo.toml. So you can update these files by
//! deleting them, running `cargo test`, and moving all the files from *wip*
//! into your testcase directory.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186579-7cd51580-6e96-11e9-9f19-54dcecc9fbba.png" width="700">
//! </p>
//!
//! Alternatively, run `cargo test` with the environment variable
//! `TRYBUILD=overwrite` to skip the *wip* directory and write all compiler
//! output directly in place. You'll want to check `git diff` afterward to be
//! sure the compiler's output is what you had in mind.
//!
//! <br>
//!
//! # What to test
//!
//! When it comes to compile-fail tests, write tests for anything for which you
//! care to find out when there are changes in the user-facing compiler output.
//! As a negative example, please don't write compile-fail tests simply calling
//! all of your public APIs with arguments of the wrong type; there would be no
//! benefit.
//!
//! A common use would be for testing specific targeted error messages emitted
//! by a procedural macro. For example the derive macro from the [`ref-cast`]
//! crate is required to be placed on a type that has either `#[repr(C)]` or
//! `#[repr(transparent)]` in order for the expansion to be free of undefined
//! behavior, which it enforces at compile time:
//!
//! [`ref-cast`]: https://github.com/dtolnay/ref-cast
//!
//! ```console
//! error: RefCast trait requires #[repr(C)] or #[repr(transparent)]
//! --> $DIR/missing-repr.rs:3:10
//! |
//! 3 | #[derive(RefCast)]
//! | ^^^^^^^
//! ```
//!
//! Macros that consume helper attributes will want to check that unrecognized
//! content within those attributes is properly indicated to the caller. Is the
//! error message correctly placed under the erroneous tokens, not on a useless
//! call\_site span?
//!
//! ```console
//! error: unknown serde field attribute `qqq`
//! --> $DIR/unknown-attribute.rs:5:13
//! |
//! 5 | #[serde(qqq = "...")]
//! | ^^^
//! ```
//!
//! Declarative macros can benefit from compile-fail tests too. The [`json!`]
//! macro from serde\_json is just a great big macro\_rules macro but makes an
//! effort to have error messages from broken JSON in the input always appear on
//! the most appropriate token:
//!
//! [`json!`]: https://docs.rs/serde_json/1.0/serde_json/macro.json.html
//!
//! ```console
//! error: no rules expected the token `,`
//! --> $DIR/double-comma.rs:4:38
//! |
//! 4 | println!("{}", json!({ "k": null,, }));
//! | ^ no rules expected this token in macro call
//! ```
//!
//! Sometimes we may have a macro that expands successfully but we count on it
//! to trigger particular compiler errors at some point beyond macro expansion.
//! For example the [`readonly`] crate introduces struct fields that are public
//! but readable only, even if the caller has a &mut reference to the
//! surrounding struct. If someone writes to a readonly field, we need to be
//! sure that it wouldn't compile:
//!
//! [`readonly`]: https://github.com/dtolnay/readonly
//!
//! ```console
//! error[E0594]: cannot assign to data in a `&` reference
//! --> $DIR/write-a-readonly.rs:17:26
//! |
//! 17 | println!("{}", s.n); s.n += 1;
//! | ^^^^^^^^ cannot assign
//! ```
//!
//! In all of these cases, the compiler's output can change because our crate or
//! one of our dependencies broke something, or as a consequence of changes in
//! the Rust compiler. Both are good reasons to have well conceived compile-fail
//! tests. If we refactor and mistakenly cause an error that used to be correct
//! to now no longer be emitted or be emitted in the wrong place, that is
//! important for a test suite to catch. If the compiler changes something that
//! makes error messages that we care about substantially worse, it is also
//! important to catch and report as a compiler issue.
#![doc(html_root_url = "https://docs.rs/trybuild/1.0.83")]
#![allow(
clippy::collapsible_if,
clippy::default_trait_access,
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::enum_glob_use,
clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285
clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410
clippy::manual_assert,
clippy::manual_range_contains,
clippy::module_inception,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::range_plus_one,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::trivially_copy_pass_by_ref,
clippy::unused_self,
clippy::while_let_on_iterator,
)]
#![deny(clippy::clone_on_ref_ptr)]
#[macro_use]
mod term;
#[macro_use]
mod path;
mod cargo;
mod dependencies;
mod diff;
mod directory;
mod env;
mod error;
mod expand;
mod features;
mod flock;
mod inherit;
mod manifest;
mod message;
mod normalize;
mod run;
mod rustflags;
use std::cell::RefCell;
use std::panic::RefUnwindSafe;
use std::path::{Path, PathBuf};
use std::thread;
#[derive(Debug)]
pub struct TestCases {
runner: RefCell<Runner>,
}
#[derive(Debug)]
struct Runner {
tests: Vec<Test>,
}
#[derive(Clone, Debug)]
struct Test {
path: PathBuf,
expected: Expected,
}
#[derive(Copy, Clone, Debug)]
enum Expected {
Pass,
CompileFail,
}
impl TestCases {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
TestCases {
runner: RefCell::new(Runner { tests: Vec::new() }),
}
}
pub fn pass<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::Pass,
});
}
pub fn compile_fail<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::CompileFail,
});
}
}
impl RefUnwindSafe for TestCases {}
#[doc(hidden)]
impl Drop for TestCases {
fn drop(&mut self) {
if!thread::panicking() |
}
}
| {
self.runner.borrow_mut().run();
} | conditional_block |
lib.rs | //! [![github]](https://github.com/dtolnay/trybuild) [![crates-io]](https://crates.io/crates/trybuild) [![docs-rs]](https://docs.rs/trybuild)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs
//!
//! <br>
//!
//! ####  A compiler diagnostics testing library in just 3 functions.
//!
//! Trybuild is a test harness for invoking rustc on a set of test cases and
//! asserting that any resulting error messages are the ones intended.
//!
//! Such tests are commonly useful for testing error reporting involving
//! procedural macros. We would write test cases triggering either errors
//! detected by the macro or errors detected by the Rust compiler in the
//! resulting expanded code, and compare against the expected errors to ensure
//! that they remain user-friendly.
//!
//! This style of testing is sometimes called *ui tests* because they test
//! aspects of the user's interaction with a library outside of what would be
//! covered by ordinary API tests.
//!
//! Nothing here is specific to macros; trybuild would work equally well for
//! testing misuse of non-macro APIs.
//!
//! <br>
//!
//! # Compile-fail tests
//!
//! A minimal trybuild setup looks like this:
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.compile_fail("tests/ui/*.rs");
//! }
//! ```
//!
//! The test can be run with `cargo test`. It will individually compile each of
//! the source files matching the glob pattern, expect them to fail to compile,
//! and assert that the compiler's error message matches an adjacently named
//! _*.stderr_ file containing the expected output (same file name as the test
//! except with a different extension). If it matches, the test case is
//! considered to succeed.
//!
//! Dependencies listed under `[dev-dependencies]` in the project's Cargo.toml
//! are accessible from within the test cases.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186574-76469e00-6e96-11e9-8cb5-b63b657170c9.png" width="700">
//! </p>
//!
//! Failing tests display the expected vs actual compiler output inline.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186575-79418e80-6e96-11e9-9478-c9b3dc10327f.png" width="700">
//! </p>
//!
//! A compile_fail test that fails to fail to compile is also a failure.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186576-7b0b5200-6e96-11e9-8bfd-2de705125108.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Pass tests
//!
//! The same test harness is able to run tests that are expected to pass, too.
//! Ordinarily you would just have Cargo run such tests directly, but being able
//! to combine modes like this could be useful for workshops in which
//! participants work through test cases enabling one at a time. Trybuild was
//! originally developed for my [procedural macros workshop at Rust
//! Latam][workshop].
//!
//! [workshop]: https://github.com/dtolnay/proc-macro-workshop
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.pass("tests/01-parse-header.rs");
//! t.pass("tests/02-parse-body.rs");
//! t.compile_fail("tests/03-expand-four-errors.rs");
//! t.pass("tests/04-paste-ident.rs");
//! t.pass("tests/05-repeat-section.rs");
//! //t.pass("tests/06-make-work-in-function.rs");
//! //t.pass("tests/07-init-array.rs");
//! //t.compile_fail("tests/08-ident-span.rs");
//! }
//! ```
//!
//! Pass tests are considered to succeed if they compile successfully and have a
//! `main` function that does not panic when the compiled binary is executed.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186580-7f376f80-6e96-11e9-9cae-8257609269ef.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Details
//!
//! That's the entire API.
//!
//! <br>
//!
//! # Workflow
//!
//! There are two ways to update the _*.stderr_ files as you iterate on your
//! test cases or your library; handwriting them is not recommended.
//!
//! First, if a test case is being run as compile_fail but a corresponding
//! _*.stderr_ file does not exist, the test runner will save the actual
//! compiler output with the right filename into a directory called *wip* within
//! the directory containing Cargo.toml. So you can update these files by
//! deleting them, running `cargo test`, and moving all the files from *wip*
//! into your testcase directory.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186579-7cd51580-6e96-11e9-9f19-54dcecc9fbba.png" width="700">
//! </p>
//!
//! Alternatively, run `cargo test` with the environment variable
//! `TRYBUILD=overwrite` to skip the *wip* directory and write all compiler
//! output directly in place. You'll want to check `git diff` afterward to be
//! sure the compiler's output is what you had in mind.
//!
//! <br>
//!
//! # What to test
//!
//! When it comes to compile-fail tests, write tests for anything for which you
//! care to find out when there are changes in the user-facing compiler output.
//! As a negative example, please don't write compile-fail tests simply calling
//! all of your public APIs with arguments of the wrong type; there would be no
//! benefit.
//!
//! A common use would be for testing specific targeted error messages emitted | //! [`ref-cast`]: https://github.com/dtolnay/ref-cast
//!
//! ```console
//! error: RefCast trait requires #[repr(C)] or #[repr(transparent)]
//! --> $DIR/missing-repr.rs:3:10
//! |
//! 3 | #[derive(RefCast)]
//! | ^^^^^^^
//! ```
//!
//! Macros that consume helper attributes will want to check that unrecognized
//! content within those attributes is properly indicated to the caller. Is the
//! error message correctly placed under the erroneous tokens, not on a useless
//! call\_site span?
//!
//! ```console
//! error: unknown serde field attribute `qqq`
//! --> $DIR/unknown-attribute.rs:5:13
//! |
//! 5 | #[serde(qqq = "...")]
//! | ^^^
//! ```
//!
//! Declarative macros can benefit from compile-fail tests too. The [`json!`]
//! macro from serde\_json is just a great big macro\_rules macro but makes an
//! effort to have error messages from broken JSON in the input always appear on
//! the most appropriate token:
//!
//! [`json!`]: https://docs.rs/serde_json/1.0/serde_json/macro.json.html
//!
//! ```console
//! error: no rules expected the token `,`
//! --> $DIR/double-comma.rs:4:38
//! |
//! 4 | println!("{}", json!({ "k": null,, }));
//! | ^ no rules expected this token in macro call
//! ```
//!
//! Sometimes we may have a macro that expands successfully but we count on it
//! to trigger particular compiler errors at some point beyond macro expansion.
//! For example the [`readonly`] crate introduces struct fields that are public
//! but readable only, even if the caller has a &mut reference to the
//! surrounding struct. If someone writes to a readonly field, we need to be
//! sure that it wouldn't compile:
//!
//! [`readonly`]: https://github.com/dtolnay/readonly
//!
//! ```console
//! error[E0594]: cannot assign to data in a `&` reference
//! --> $DIR/write-a-readonly.rs:17:26
//! |
//! 17 | println!("{}", s.n); s.n += 1;
//! | ^^^^^^^^ cannot assign
//! ```
//!
//! In all of these cases, the compiler's output can change because our crate or
//! one of our dependencies broke something, or as a consequence of changes in
//! the Rust compiler. Both are good reasons to have well conceived compile-fail
//! tests. If we refactor and mistakenly cause an error that used to be correct
//! to now no longer be emitted or be emitted in the wrong place, that is
//! important for a test suite to catch. If the compiler changes something that
//! makes error messages that we care about substantially worse, it is also
//! important to catch and report as a compiler issue.
#![doc(html_root_url = "https://docs.rs/trybuild/1.0.83")]
#![allow(
clippy::collapsible_if,
clippy::default_trait_access,
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::enum_glob_use,
clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285
clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410
clippy::manual_assert,
clippy::manual_range_contains,
clippy::module_inception,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::range_plus_one,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::trivially_copy_pass_by_ref,
clippy::unused_self,
clippy::while_let_on_iterator,
)]
#![deny(clippy::clone_on_ref_ptr)]
#[macro_use]
mod term;
#[macro_use]
mod path;
mod cargo;
mod dependencies;
mod diff;
mod directory;
mod env;
mod error;
mod expand;
mod features;
mod flock;
mod inherit;
mod manifest;
mod message;
mod normalize;
mod run;
mod rustflags;
use std::cell::RefCell;
use std::panic::RefUnwindSafe;
use std::path::{Path, PathBuf};
use std::thread;
#[derive(Debug)]
pub struct TestCases {
runner: RefCell<Runner>,
}
#[derive(Debug)]
struct Runner {
tests: Vec<Test>,
}
#[derive(Clone, Debug)]
struct Test {
path: PathBuf,
expected: Expected,
}
#[derive(Copy, Clone, Debug)]
enum Expected {
Pass,
CompileFail,
}
impl TestCases {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
TestCases {
runner: RefCell::new(Runner { tests: Vec::new() }),
}
}
pub fn pass<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::Pass,
});
}
pub fn compile_fail<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::CompileFail,
});
}
}
impl RefUnwindSafe for TestCases {}
#[doc(hidden)]
impl Drop for TestCases {
fn drop(&mut self) {
if!thread::panicking() {
self.runner.borrow_mut().run();
}
}
} | //! by a procedural macro. For example the derive macro from the [`ref-cast`]
//! crate is required to be placed on a type that has either `#[repr(C)]` or
//! `#[repr(transparent)]` in order for the expansion to be free of undefined
//! behavior, which it enforces at compile time:
//! | random_line_split |
20.rs | use std::io::{self, BufRead};
use std::collections::HashMap;
const IMAGEDIM: usize = 8;
type Image = [u16; IMAGEDIM];
// the bool encodes flip
#[derive(Debug, Copy, Clone)]
enum Orientation {
Up(bool),
Left(bool),
Down(bool),
Right(bool),
}
fn rot_orientation(ori: Orientation) -> Orientation {
use Orientation::*;
match ori {
Up(f) => Left(f),
Left(f) => Down(f),
Down(f) => Right(f),
Right(f) => Up(f),
}
}
// flip along x axis: upside down
fn flip_orientation(ori: Orientation) -> Orientation {
use Orientation::*;
match ori {
Up(f) => Down(!f),
Left(f) => Left(!f),
Down(f) => Up(!f),
Right(f) => Right(!f),
}
}
// - top bottom left right, not sure why I didn't make this a struct
// - bits run MSB left to LSB right, MSB top to LSB bottom
// - could also store these in one big u64 for more fun rotations but that's too clever
type Borders = (u16, u16, u16, u16);
#[derive(Debug, Clone, Copy)]
struct Tile {
name: u16,
borders: Borders,
orientation: Orientation,
}
// dim doesn't change but it's handy to keep here
#[derive(Debug, Clone)]
struct State {
map: Vec<Option<Tile>>,
dim: usize,
}
impl State {
fn new(dim: usize) -> State {
State {
map: vec![None; dim * dim],
dim,
}
}
fn coord(&self, x: usize, y: usize) -> usize {
y * self.dim + x
}
fn at(&self, x: usize, y: usize) -> &Option<Tile> {
&self.map[self.coord(x, y)]
}
fn top_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.0)
}
fn bottom_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.1)
}
fn left_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.2)
}
fn right_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.3)
}
fn accepts(&self, pos: usize, tile: &Tile) -> bool {
assert!(self.map[pos].is_none());
let x = pos % self.dim;
let y = pos / self.dim;
if y > 0 && self.bottom_border(self.coord(x, y - 1)).map(|border| border!=
tile.borders.0).unwrap_or(false) {
return false;
}
if y < self.dim - 1 && self.top_border(self.coord(x, y + 1)).map(|border| border!=
tile.borders.1).unwrap_or(false) {
return false;
}
if x > 0 && self.right_border(self.coord(x - 1, y)).map(|border| border!=
tile.borders.2).unwrap_or(false) {
return false;
}
if x < self.dim - 1 && self.left_border(self.coord(x + 1, y)).map(|border| border!=
tile.borders.3).unwrap_or(false) {
return false;
}
true
}
}
fn flipbits(mut bits: u16) -> u16 {
// careful, just the lowest 10 bits, not 16
// 0123456789
// 9876543210
let mut out = 0;
for _ in 0..(IMAGEDIM + 2) {
out <<= 1;
out |= bits & 1;
bits >>= 1;
}
out
}
// counting from the right, MSB is top
fn img_column(image: &Image, col: usize) -> u16 {
image.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[y] = img_column(&image, y);
}
out
}
fn flip_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[IMAGEDIM - 1 - y] = image[y];
}
out
}
fn orient_image(original: Image, ori: Orientation) -> Image {
use Orientation::*;
match ori {
Up(false) => original,
Left(false) => rotate_img(original),
Down(false) => rotate_img(rotate_img(original)),
Right(false) => rotate_img(rotate_img(rotate_img(original))),
Up(true) => rotate_img(rotate_img(flip_img(original))),
Left(true) => rotate_img(rotate_img(rotate_img(flip_img(original)))),
Down(true) => flip_img(original),
Right(true) => rotate_img(flip_img(original)),
}
}
// rotate 90 degrees ccw, keep the bit order. could also store all ccw and do flips in comparisons
fn rotate(tile: Tile) -> Tile {
Tile {
name: tile.name,
// top, bottom, left, right; bits left to right, top to bottom
borders: (tile.borders.3, tile.borders.2, flipbits(tile.borders.0), flipbits(tile.borders.1)),
orientation: rot_orientation(tile.orientation),
}
}
// along x axis: top and bottom swap, left and right are mirrored
fn flipx(tile: Tile) -> Tile {
Tile {
name: tile.name,
borders: (tile.borders.1, tile.borders.0, flipbits(tile.borders.2), flipbits(tile.borders.3)),
orientation: flip_orientation(tile.orientation),
}
}
fn search(current_state: State, remaining_tiles: Vec<Tile>) -> Option<State> {
if false {
println!("---");
for y in 0..current_state.dim {
for x in 0..current_state.dim {
if let Some(tile) = current_state.at(x, y) {
print!("{} ", tile.name);
} else {
print!(".... ");
}
}
println!();
}
}
if remaining_tiles.is_empty() {
// all consumed, this is a valid solution
return Some(current_state);
}
// if remaining tiles, the map also has equivalent number of remaining open slots
let nextpos = current_state.map.iter().position(|x| x.is_none()).unwrap();
let run_search = |tile_ix: usize, tile: Tile| {
if current_state.accepts(nextpos, &tile) {
let mut next_state = current_state.clone();
let mut next_tiles = remaining_tiles.clone();
next_state.map[nextpos] = Some(tile);
next_tiles.remove(tile_ix);
search(next_state, next_tiles)
} else {
None
}
};
for (tile_ix, &tile) in remaining_tiles.iter().enumerate() {
for &t1 in &[tile, flipx(tile)] {
for &t2 in &[t1, rotate(t1), rotate(rotate(t1)), rotate(rotate(rotate(t1)))] {
let s = run_search(tile_ix, t2);
if s.is_some() {
// many solutions could exist due to symmetry, but any of them is acceptable
// because they're equivalent so pick the first when one is found
return s;
}
}
}
}
None
}
type Sea = Vec<u128>;
/* epic sea monster
* 98765432109876543210
* #
* # ## ## ###
* # # # # # #
*/
const MONS0: u128 = 0b00000000000000000010;
const MONS1: u128 = 0b10000110000110000111;
const MONS2: u128 = 0b01001001001001001000;
const MONS_LEN: usize = 20; // bits
fn monster_x_position(a: u128, b: u128, c: u128, x: usize) -> Option<usize> {
for shift in x..=(128 - MONS_LEN) {
let abits = (a >> shift) & MONS0;
let bbits = (b >> shift) & MONS1;
let cbits = (c >> shift) & MONS2;
if abits == MONS0 && bbits == MONS1 && cbits == MONS2 {
return Some(shift);
}
}
None
}
fn sea_monsters(sea: &Sea) -> Vec<(usize, usize)> {
// can the monsters overlap? Not specified, hopefully it doesn't matter
let mut mons = Vec::new();
for (y, rows) in sea.windows(3).enumerate() {
let mut x0 = 0;
while let Some(shift) = monster_x_position(rows[0], rows[1], rows[2], x0) {
mons.push((shift, y));
x0 = shift + 1;
}
}
mons
}
fn flip_sea(sea: &Sea) -> Sea {
sea.iter().rev().copied().collect()
}
fn sea_column(sea: &Sea, col: usize) -> u128 {
sea.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_sea(sea: &Sea) -> Sea {
let mut out = Vec::new();
for y in 0..128 {
out.push(sea_column(sea, y));
}
out
}
fn dump_sea(sea: &Sea) {
for row in sea.iter() {
for c in (0..128).rev() {
print!("{}", if (row & (1 << c))!= 0 { '#' } else { '.' });
}
println!();
}
}
fn water_roughness(sea: &Sea) -> usize {
let mut seas = [
sea.clone(),
rotate_sea(sea),
rotate_sea(&rotate_sea(sea)),
rotate_sea(&rotate_sea(&rotate_sea(sea))),
flip_sea(sea),
rotate_sea(&flip_sea(sea)),
rotate_sea(&rotate_sea(&flip_sea(sea))),
rotate_sea(&rotate_sea(&rotate_sea(&flip_sea(sea)))),
];
let monster_locations: Vec<Vec<_>> = seas.iter().map(sea_monsters).collect();
assert!(monster_locations.iter().filter(|x|!x.is_empty()).count() == 1);
let (sea, monsters): (&mut Sea, &Vec<_>) = seas.iter_mut().zip(monster_locations.iter())
.find(|(_s, m)|!m.is_empty()).unwrap();
let initial_roughness: usize = sea.iter().map(|waves| waves.count_ones() as usize).sum(); |
if false {
dump_sea(sea);
println!();
}
let monster_weight = (MONS0.count_ones() + MONS1.count_ones() + MONS2.count_ones()) as usize;
println!("quick check: {}", initial_roughness - monsters.len() * monster_weight);
for (y, row) in sea.iter().enumerate() {
for c in (0..128).rev() {
let m = monsters.iter().any(|&(ms, my)| {
let m0 = y == my && ((MONS0 << ms) & (1 << c))!= 0;
let m1 = y == my + 1 && ((MONS1 << ms) & (1 << c))!= 0;
let m2 = y == my + 2 && ((MONS2 << ms) & (1 << c))!= 0;
m0 || m1 || m2
});
if m {
print!("O");
} else {
print!("{}", if (row & (1 << c))!= 0 { '#' } else { '.' });
}
}
println!();
}
println!();
// if any monsters overlap, this could be more reliable than the quick estimate
for &(ms, my) in monsters.iter() {
sea[my ] &=!(MONS0 << ms);
sea[my + 1] &=!(MONS1 << ms);
sea[my + 2] &=!(MONS2 << ms);
}
sea.iter().map(|waves| waves.count_ones() as usize).sum()
}
fn form_actual_image(tilemap: &HashMap<u16, &(Tile, Image)>, state: &State) -> Sea {
let mut sea: Sea = vec![0; state.dim * IMAGEDIM];
for y in 0..state.dim {
for x in 0..state.dim {
let tile = state.at(x, y).unwrap();
let img = orient_image(tilemap[&tile.name].1, tile.orientation);
for (rowi, &rowbits) in img.iter().enumerate() {
sea[y * IMAGEDIM + rowi] |= (rowbits as u128) << ((state.dim - 1 - x) * IMAGEDIM);
}
}
}
sea
}
// MSB is left or top
fn parse_tile(input: &[String]) -> (Tile, Image) {
let name = input[0].strip_prefix("Tile ").unwrap().strip_suffix(":").unwrap().parse().unwrap();
let top = input[1].as_bytes().iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
let bottom = input.last().unwrap().as_bytes().iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
let left = input[1..].iter().fold(0, |bits, row| {
(bits << 1) | ((*row.as_bytes().first().unwrap() == b'#') as u16)
});
let right = input[1..].iter().fold(0, |bits, row| {
(bits << 1) | ((*row.as_bytes().last().unwrap() == b'#') as u16)
});
let borders = (top, bottom, left, right);
let mut image = [0; IMAGEDIM];
for (srcstr, dstbits) in input[2..].iter().zip(image.iter_mut()) {
*dstbits = srcstr.as_bytes()[1..(1+IMAGEDIM)].iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
}
(Tile { name, borders, orientation: Orientation::Up(false) }, image.clone())
}
// note: my input has 144 tiles - that would be 12*12, or 9*16, or 8*18, or 6*24, etc. but the
// specs hint that the final arrangement will be square
fn main() {
assert!(flipbits(1) == (1 << 9));
assert!(flipbits(1 << 9) == (1));
assert!(flipbits(0x1e) == (0x0f << 5));
assert!(flipbits(0x0f << 5) == (0x1e));
let lines: Vec<_> = io::stdin().lock().lines()
.map(|line| line.unwrap())
.collect();
let tiles: Vec<(Tile, Image)> = lines.split(|line| line == "").map(parse_tile).collect();
// assume the image is a square
let dim = (tiles.len() as f64).sqrt() as usize;
let puzzle_tiles: Vec<Tile> = tiles.iter().map(|(t, _i)| *t).collect();
let state = search(State::new(dim), puzzle_tiles).unwrap();
for y in 0..dim {
for x in 0..dim {
print!("{} ", state.at(x, y).unwrap().name);
}
println!();
}
let corners = [
state.at(0, 0).unwrap().name as u64,
state.at(dim - 1, 0).unwrap().name as u64,
state.at(0, dim - 1).unwrap().name as u64,
state.at(dim - 1, dim - 1).unwrap().name as u64,
];
println!("{}", corners[0] * corners[1] * corners[2] * corners[3]);
// indexed by name for easier lookup
let tilemap: HashMap<u16, &(Tile, Image)> = tiles.iter().map(|ti| {
(ti.0.name, ti)
}).collect();
let sea = form_actual_image(&tilemap, &state);
println!("{}", water_roughness(&sea));
} | println!("rouff with monsters {}, {} total", initial_roughness, monsters.len()); | random_line_split |
20.rs | use std::io::{self, BufRead};
use std::collections::HashMap;
const IMAGEDIM: usize = 8;
type Image = [u16; IMAGEDIM];
// the bool encodes flip
#[derive(Debug, Copy, Clone)]
enum Orientation {
Up(bool),
Left(bool),
Down(bool),
Right(bool),
}
fn rot_orientation(ori: Orientation) -> Orientation {
use Orientation::*;
match ori {
Up(f) => Left(f),
Left(f) => Down(f),
Down(f) => Right(f),
Right(f) => Up(f),
}
}
// flip along x axis: upside down
fn flip_orientation(ori: Orientation) -> Orientation {
use Orientation::*;
match ori {
Up(f) => Down(!f),
Left(f) => Left(!f),
Down(f) => Up(!f),
Right(f) => Right(!f),
}
}
// - top bottom left right, not sure why I didn't make this a struct
// - bits run MSB left to LSB right, MSB top to LSB bottom
// - could also store these in one big u64 for more fun rotations but that's too clever
type Borders = (u16, u16, u16, u16);
#[derive(Debug, Clone, Copy)]
struct Tile {
name: u16,
borders: Borders,
orientation: Orientation,
}
// dim doesn't change but it's handy to keep here
#[derive(Debug, Clone)]
struct State {
map: Vec<Option<Tile>>,
dim: usize,
}
impl State {
fn new(dim: usize) -> State {
State {
map: vec![None; dim * dim],
dim,
}
}
fn coord(&self, x: usize, y: usize) -> usize {
y * self.dim + x
}
fn at(&self, x: usize, y: usize) -> &Option<Tile> {
&self.map[self.coord(x, y)]
}
fn top_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.0)
}
fn bottom_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.1)
}
fn left_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.2)
}
fn right_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.3)
}
fn accepts(&self, pos: usize, tile: &Tile) -> bool | true
}
}
fn flipbits(mut bits: u16) -> u16 {
// careful, just the lowest 10 bits, not 16
// 0123456789
// 9876543210
let mut out = 0;
for _ in 0..(IMAGEDIM + 2) {
out <<= 1;
out |= bits & 1;
bits >>= 1;
}
out
}
// counting from the right, MSB is top
fn img_column(image: &Image, col: usize) -> u16 {
image.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[y] = img_column(&image, y);
}
out
}
fn flip_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[IMAGEDIM - 1 - y] = image[y];
}
out
}
fn orient_image(original: Image, ori: Orientation) -> Image {
use Orientation::*;
match ori {
Up(false) => original,
Left(false) => rotate_img(original),
Down(false) => rotate_img(rotate_img(original)),
Right(false) => rotate_img(rotate_img(rotate_img(original))),
Up(true) => rotate_img(rotate_img(flip_img(original))),
Left(true) => rotate_img(rotate_img(rotate_img(flip_img(original)))),
Down(true) => flip_img(original),
Right(true) => rotate_img(flip_img(original)),
}
}
// rotate 90 degrees ccw, keep the bit order. could also store all ccw and do flips in comparisons
fn rotate(tile: Tile) -> Tile {
Tile {
name: tile.name,
// top, bottom, left, right; bits left to right, top to bottom
borders: (tile.borders.3, tile.borders.2, flipbits(tile.borders.0), flipbits(tile.borders.1)),
orientation: rot_orientation(tile.orientation),
}
}
// along x axis: top and bottom swap, left and right are mirrored
fn flipx(tile: Tile) -> Tile {
Tile {
name: tile.name,
borders: (tile.borders.1, tile.borders.0, flipbits(tile.borders.2), flipbits(tile.borders.3)),
orientation: flip_orientation(tile.orientation),
}
}
fn search(current_state: State, remaining_tiles: Vec<Tile>) -> Option<State> {
if false {
println!("---");
for y in 0..current_state.dim {
for x in 0..current_state.dim {
if let Some(tile) = current_state.at(x, y) {
print!("{} ", tile.name);
} else {
print!(".... ");
}
}
println!();
}
}
if remaining_tiles.is_empty() {
// all consumed, this is a valid solution
return Some(current_state);
}
// if remaining tiles, the map also has equivalent number of remaining open slots
let nextpos = current_state.map.iter().position(|x| x.is_none()).unwrap();
let run_search = |tile_ix: usize, tile: Tile| {
if current_state.accepts(nextpos, &tile) {
let mut next_state = current_state.clone();
let mut next_tiles = remaining_tiles.clone();
next_state.map[nextpos] = Some(tile);
next_tiles.remove(tile_ix);
search(next_state, next_tiles)
} else {
None
}
};
for (tile_ix, &tile) in remaining_tiles.iter().enumerate() {
for &t1 in &[tile, flipx(tile)] {
for &t2 in &[t1, rotate(t1), rotate(rotate(t1)), rotate(rotate(rotate(t1)))] {
let s = run_search(tile_ix, t2);
if s.is_some() {
// many solutions could exist due to symmetry, but any of them is acceptable
// because they're equivalent so pick the first when one is found
return s;
}
}
}
}
None
}
type Sea = Vec<u128>;
/* epic sea monster
* 98765432109876543210
* #
* # ## ## ###
* # # # # # #
*/
const MONS0: u128 = 0b00000000000000000010;
const MONS1: u128 = 0b10000110000110000111;
const MONS2: u128 = 0b01001001001001001000;
const MONS_LEN: usize = 20; // bits
fn monster_x_position(a: u128, b: u128, c: u128, x: usize) -> Option<usize> {
for shift in x..=(128 - MONS_LEN) {
let abits = (a >> shift) & MONS0;
let bbits = (b >> shift) & MONS1;
let cbits = (c >> shift) & MONS2;
if abits == MONS0 && bbits == MONS1 && cbits == MONS2 {
return Some(shift);
}
}
None
}
fn sea_monsters(sea: &Sea) -> Vec<(usize, usize)> {
// can the monsters overlap? Not specified, hopefully it doesn't matter
let mut mons = Vec::new();
for (y, rows) in sea.windows(3).enumerate() {
let mut x0 = 0;
while let Some(shift) = monster_x_position(rows[0], rows[1], rows[2], x0) {
mons.push((shift, y));
x0 = shift + 1;
}
}
mons
}
fn flip_sea(sea: &Sea) -> Sea {
sea.iter().rev().copied().collect()
}
fn sea_column(sea: &Sea, col: usize) -> u128 {
sea.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_sea(sea: &Sea) -> Sea {
let mut out = Vec::new();
for y in 0..128 {
out.push(sea_column(sea, y));
}
out
}
fn dump_sea(sea: &Sea) {
for row in sea.iter() {
for c in (0..128).rev() {
print!("{}", if (row & (1 << c))!= 0 { '#' } else { '.' });
}
println!();
}
}
fn water_roughness(sea: &Sea) -> usize {
let mut seas = [
sea.clone(),
rotate_sea(sea),
rotate_sea(&rotate_sea(sea)),
rotate_sea(&rotate_sea(&rotate_sea(sea))),
flip_sea(sea),
rotate_sea(&flip_sea(sea)),
rotate_sea(&rotate_sea(&flip_sea(sea))),
rotate_sea(&rotate_sea(&rotate_sea(&flip_sea(sea)))),
];
let monster_locations: Vec<Vec<_>> = seas.iter().map(sea_monsters).collect();
assert!(monster_locations.iter().filter(|x|!x.is_empty()).count() == 1);
let (sea, monsters): (&mut Sea, &Vec<_>) = seas.iter_mut().zip(monster_locations.iter())
.find(|(_s, m)|!m.is_empty()).unwrap();
let initial_roughness: usize = sea.iter().map(|waves| waves.count_ones() as usize).sum();
println!("rouff with monsters {}, {} total", initial_roughness, monsters.len());
if false {
dump_sea(sea);
println!();
}
let monster_weight = (MONS0.count_ones() + MONS1.count_ones() + MONS2.count_ones()) as usize;
println!("quick check: {}", initial_roughness - monsters.len() * monster_weight);
for (y, row) in sea.iter().enumerate() {
for c in (0..128).rev() {
let m = monsters.iter().any(|&(ms, my)| {
let m0 = y == my && ((MONS0 << ms) & (1 << c))!= 0;
let m1 = y == my + 1 && ((MONS1 << ms) & (1 << c))!= 0;
let m2 = y == my + 2 && ((MONS2 << ms) & (1 << c))!= 0;
m0 || m1 || m2
});
if m {
print!("O");
} else {
print!("{}", if (row & (1 << c))!= 0 { '#' } else { '.' });
}
}
println!();
}
println!();
// if any monsters overlap, this could be more reliable than the quick estimate
for &(ms, my) in monsters.iter() {
sea[my ] &=!(MONS0 << ms);
sea[my + 1] &=!(MONS1 << ms);
sea[my + 2] &=!(MONS2 << ms);
}
sea.iter().map(|waves| waves.count_ones() as usize).sum()
}
fn form_actual_image(tilemap: &HashMap<u16, &(Tile, Image)>, state: &State) -> Sea {
let mut sea: Sea = vec![0; state.dim * IMAGEDIM];
for y in 0..state.dim {
for x in 0..state.dim {
let tile = state.at(x, y).unwrap();
let img = orient_image(tilemap[&tile.name].1, tile.orientation);
for (rowi, &rowbits) in img.iter().enumerate() {
sea[y * IMAGEDIM + rowi] |= (rowbits as u128) << ((state.dim - 1 - x) * IMAGEDIM);
}
}
}
sea
}
// MSB is left or top
fn parse_tile(input: &[String]) -> (Tile, Image) {
let name = input[0].strip_prefix("Tile ").unwrap().strip_suffix(":").unwrap().parse().unwrap();
let top = input[1].as_bytes().iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
let bottom = input.last().unwrap().as_bytes().iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
let left = input[1..].iter().fold(0, |bits, row| {
(bits << 1) | ((*row.as_bytes().first().unwrap() == b'#') as u16)
});
let right = input[1..].iter().fold(0, |bits, row| {
(bits << 1) | ((*row.as_bytes().last().unwrap() == b'#') as u16)
});
let borders = (top, bottom, left, right);
let mut image = [0; IMAGEDIM];
for (srcstr, dstbits) in input[2..].iter().zip(image.iter_mut()) {
*dstbits = srcstr.as_bytes()[1..(1+IMAGEDIM)].iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
}
(Tile { name, borders, orientation: Orientation::Up(false) }, image.clone())
}
// note: my input has 144 tiles - that would be 12*12, or 9*16, or 8*18, or 6*24, etc. but the
// specs hint that the final arrangement will be square
fn main() {
assert!(flipbits(1) == (1 << 9));
assert!(flipbits(1 << 9) == (1));
assert!(flipbits(0x1e) == (0x0f << 5));
assert!(flipbits(0x0f << 5) == (0x1e));
let lines: Vec<_> = io::stdin().lock().lines()
.map(|line| line.unwrap())
.collect();
let tiles: Vec<(Tile, Image)> = lines.split(|line| line == "").map(parse_tile).collect();
// assume the image is a square
let dim = (tiles.len() as f64).sqrt() as usize;
let puzzle_tiles: Vec<Tile> = tiles.iter().map(|(t, _i)| *t).collect();
let state = search(State::new(dim), puzzle_tiles).unwrap();
for y in 0..dim {
for x in 0..dim {
print!("{} ", state.at(x, y).unwrap().name);
}
println!();
}
let corners = [
state.at(0, 0).unwrap().name as u64,
state.at(dim - 1, 0).unwrap().name as u64,
state.at(0, dim - 1).unwrap().name as u64,
state.at(dim - 1, dim - 1).unwrap().name as u64,
];
println!("{}", corners[0] * corners[1] * corners[2] * corners[3]);
// indexed by name for easier lookup
let tilemap: HashMap<u16, &(Tile, Image)> = tiles.iter().map(|ti| {
(ti.0.name, ti)
}).collect();
let sea = form_actual_image(&tilemap, &state);
println!("{}", water_roughness(&sea));
}
| {
assert!(self.map[pos].is_none());
let x = pos % self.dim;
let y = pos / self.dim;
if y > 0 && self.bottom_border(self.coord(x, y - 1)).map(|border| border !=
tile.borders.0).unwrap_or(false) {
return false;
}
if y < self.dim - 1 && self.top_border(self.coord(x, y + 1)).map(|border| border !=
tile.borders.1).unwrap_or(false) {
return false;
}
if x > 0 && self.right_border(self.coord(x - 1, y)).map(|border| border !=
tile.borders.2).unwrap_or(false) {
return false;
}
if x < self.dim - 1 && self.left_border(self.coord(x + 1, y)).map(|border| border !=
tile.borders.3).unwrap_or(false) {
return false;
} | identifier_body |
20.rs | use std::io::{self, BufRead};
use std::collections::HashMap;
const IMAGEDIM: usize = 8;
type Image = [u16; IMAGEDIM];
// the bool encodes flip
#[derive(Debug, Copy, Clone)]
enum Orientation {
Up(bool),
Left(bool),
Down(bool),
Right(bool),
}
fn rot_orientation(ori: Orientation) -> Orientation {
use Orientation::*;
match ori {
Up(f) => Left(f),
Left(f) => Down(f),
Down(f) => Right(f),
Right(f) => Up(f),
}
}
// flip along x axis: upside down
fn flip_orientation(ori: Orientation) -> Orientation {
use Orientation::*;
match ori {
Up(f) => Down(!f),
Left(f) => Left(!f),
Down(f) => Up(!f),
Right(f) => Right(!f),
}
}
// - top bottom left right, not sure why I didn't make this a struct
// - bits run MSB left to LSB right, MSB top to LSB bottom
// - could also store these in one big u64 for more fun rotations but that's too clever
type Borders = (u16, u16, u16, u16);
#[derive(Debug, Clone, Copy)]
struct Tile {
name: u16,
borders: Borders,
orientation: Orientation,
}
// dim doesn't change but it's handy to keep here
#[derive(Debug, Clone)]
struct State {
map: Vec<Option<Tile>>,
dim: usize,
}
impl State {
fn new(dim: usize) -> State {
State {
map: vec![None; dim * dim],
dim,
}
}
fn coord(&self, x: usize, y: usize) -> usize {
y * self.dim + x
}
fn at(&self, x: usize, y: usize) -> &Option<Tile> {
&self.map[self.coord(x, y)]
}
fn top_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.0)
}
fn bottom_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.1)
}
fn left_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.2)
}
fn right_border(&self, coord: usize) -> Option<u16> {
self.map[coord].map(|tile| tile.borders.3)
}
fn | (&self, pos: usize, tile: &Tile) -> bool {
assert!(self.map[pos].is_none());
let x = pos % self.dim;
let y = pos / self.dim;
if y > 0 && self.bottom_border(self.coord(x, y - 1)).map(|border| border!=
tile.borders.0).unwrap_or(false) {
return false;
}
if y < self.dim - 1 && self.top_border(self.coord(x, y + 1)).map(|border| border!=
tile.borders.1).unwrap_or(false) {
return false;
}
if x > 0 && self.right_border(self.coord(x - 1, y)).map(|border| border!=
tile.borders.2).unwrap_or(false) {
return false;
}
if x < self.dim - 1 && self.left_border(self.coord(x + 1, y)).map(|border| border!=
tile.borders.3).unwrap_or(false) {
return false;
}
true
}
}
fn flipbits(mut bits: u16) -> u16 {
// careful, just the lowest 10 bits, not 16
// 0123456789
// 9876543210
let mut out = 0;
for _ in 0..(IMAGEDIM + 2) {
out <<= 1;
out |= bits & 1;
bits >>= 1;
}
out
}
// counting from the right, MSB is top
fn img_column(image: &Image, col: usize) -> u16 {
image.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[y] = img_column(&image, y);
}
out
}
fn flip_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[IMAGEDIM - 1 - y] = image[y];
}
out
}
fn orient_image(original: Image, ori: Orientation) -> Image {
use Orientation::*;
match ori {
Up(false) => original,
Left(false) => rotate_img(original),
Down(false) => rotate_img(rotate_img(original)),
Right(false) => rotate_img(rotate_img(rotate_img(original))),
Up(true) => rotate_img(rotate_img(flip_img(original))),
Left(true) => rotate_img(rotate_img(rotate_img(flip_img(original)))),
Down(true) => flip_img(original),
Right(true) => rotate_img(flip_img(original)),
}
}
// rotate 90 degrees ccw, keep the bit order. could also store all ccw and do flips in comparisons
fn rotate(tile: Tile) -> Tile {
Tile {
name: tile.name,
// top, bottom, left, right; bits left to right, top to bottom
borders: (tile.borders.3, tile.borders.2, flipbits(tile.borders.0), flipbits(tile.borders.1)),
orientation: rot_orientation(tile.orientation),
}
}
// along x axis: top and bottom swap, left and right are mirrored
fn flipx(tile: Tile) -> Tile {
Tile {
name: tile.name,
borders: (tile.borders.1, tile.borders.0, flipbits(tile.borders.2), flipbits(tile.borders.3)),
orientation: flip_orientation(tile.orientation),
}
}
fn search(current_state: State, remaining_tiles: Vec<Tile>) -> Option<State> {
if false {
println!("---");
for y in 0..current_state.dim {
for x in 0..current_state.dim {
if let Some(tile) = current_state.at(x, y) {
print!("{} ", tile.name);
} else {
print!(".... ");
}
}
println!();
}
}
if remaining_tiles.is_empty() {
// all consumed, this is a valid solution
return Some(current_state);
}
// if remaining tiles, the map also has equivalent number of remaining open slots
let nextpos = current_state.map.iter().position(|x| x.is_none()).unwrap();
let run_search = |tile_ix: usize, tile: Tile| {
if current_state.accepts(nextpos, &tile) {
let mut next_state = current_state.clone();
let mut next_tiles = remaining_tiles.clone();
next_state.map[nextpos] = Some(tile);
next_tiles.remove(tile_ix);
search(next_state, next_tiles)
} else {
None
}
};
for (tile_ix, &tile) in remaining_tiles.iter().enumerate() {
for &t1 in &[tile, flipx(tile)] {
for &t2 in &[t1, rotate(t1), rotate(rotate(t1)), rotate(rotate(rotate(t1)))] {
let s = run_search(tile_ix, t2);
if s.is_some() {
// many solutions could exist due to symmetry, but any of them is acceptable
// because they're equivalent so pick the first when one is found
return s;
}
}
}
}
None
}
type Sea = Vec<u128>;
/* epic sea monster
* 98765432109876543210
* #
* # ## ## ###
* # # # # # #
*/
const MONS0: u128 = 0b00000000000000000010;
const MONS1: u128 = 0b10000110000110000111;
const MONS2: u128 = 0b01001001001001001000;
const MONS_LEN: usize = 20; // bits
fn monster_x_position(a: u128, b: u128, c: u128, x: usize) -> Option<usize> {
for shift in x..=(128 - MONS_LEN) {
let abits = (a >> shift) & MONS0;
let bbits = (b >> shift) & MONS1;
let cbits = (c >> shift) & MONS2;
if abits == MONS0 && bbits == MONS1 && cbits == MONS2 {
return Some(shift);
}
}
None
}
fn sea_monsters(sea: &Sea) -> Vec<(usize, usize)> {
// can the monsters overlap? Not specified, hopefully it doesn't matter
let mut mons = Vec::new();
for (y, rows) in sea.windows(3).enumerate() {
let mut x0 = 0;
while let Some(shift) = monster_x_position(rows[0], rows[1], rows[2], x0) {
mons.push((shift, y));
x0 = shift + 1;
}
}
mons
}
fn flip_sea(sea: &Sea) -> Sea {
sea.iter().rev().copied().collect()
}
fn sea_column(sea: &Sea, col: usize) -> u128 {
sea.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_sea(sea: &Sea) -> Sea {
let mut out = Vec::new();
for y in 0..128 {
out.push(sea_column(sea, y));
}
out
}
fn dump_sea(sea: &Sea) {
for row in sea.iter() {
for c in (0..128).rev() {
print!("{}", if (row & (1 << c))!= 0 { '#' } else { '.' });
}
println!();
}
}
fn water_roughness(sea: &Sea) -> usize {
let mut seas = [
sea.clone(),
rotate_sea(sea),
rotate_sea(&rotate_sea(sea)),
rotate_sea(&rotate_sea(&rotate_sea(sea))),
flip_sea(sea),
rotate_sea(&flip_sea(sea)),
rotate_sea(&rotate_sea(&flip_sea(sea))),
rotate_sea(&rotate_sea(&rotate_sea(&flip_sea(sea)))),
];
let monster_locations: Vec<Vec<_>> = seas.iter().map(sea_monsters).collect();
assert!(monster_locations.iter().filter(|x|!x.is_empty()).count() == 1);
let (sea, monsters): (&mut Sea, &Vec<_>) = seas.iter_mut().zip(monster_locations.iter())
.find(|(_s, m)|!m.is_empty()).unwrap();
let initial_roughness: usize = sea.iter().map(|waves| waves.count_ones() as usize).sum();
println!("rouff with monsters {}, {} total", initial_roughness, monsters.len());
if false {
dump_sea(sea);
println!();
}
let monster_weight = (MONS0.count_ones() + MONS1.count_ones() + MONS2.count_ones()) as usize;
println!("quick check: {}", initial_roughness - monsters.len() * monster_weight);
for (y, row) in sea.iter().enumerate() {
for c in (0..128).rev() {
let m = monsters.iter().any(|&(ms, my)| {
let m0 = y == my && ((MONS0 << ms) & (1 << c))!= 0;
let m1 = y == my + 1 && ((MONS1 << ms) & (1 << c))!= 0;
let m2 = y == my + 2 && ((MONS2 << ms) & (1 << c))!= 0;
m0 || m1 || m2
});
if m {
print!("O");
} else {
print!("{}", if (row & (1 << c))!= 0 { '#' } else { '.' });
}
}
println!();
}
println!();
// if any monsters overlap, this could be more reliable than the quick estimate
for &(ms, my) in monsters.iter() {
sea[my ] &=!(MONS0 << ms);
sea[my + 1] &=!(MONS1 << ms);
sea[my + 2] &=!(MONS2 << ms);
}
sea.iter().map(|waves| waves.count_ones() as usize).sum()
}
fn form_actual_image(tilemap: &HashMap<u16, &(Tile, Image)>, state: &State) -> Sea {
let mut sea: Sea = vec![0; state.dim * IMAGEDIM];
for y in 0..state.dim {
for x in 0..state.dim {
let tile = state.at(x, y).unwrap();
let img = orient_image(tilemap[&tile.name].1, tile.orientation);
for (rowi, &rowbits) in img.iter().enumerate() {
sea[y * IMAGEDIM + rowi] |= (rowbits as u128) << ((state.dim - 1 - x) * IMAGEDIM);
}
}
}
sea
}
// MSB is left or top
fn parse_tile(input: &[String]) -> (Tile, Image) {
let name = input[0].strip_prefix("Tile ").unwrap().strip_suffix(":").unwrap().parse().unwrap();
let top = input[1].as_bytes().iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
let bottom = input.last().unwrap().as_bytes().iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
let left = input[1..].iter().fold(0, |bits, row| {
(bits << 1) | ((*row.as_bytes().first().unwrap() == b'#') as u16)
});
let right = input[1..].iter().fold(0, |bits, row| {
(bits << 1) | ((*row.as_bytes().last().unwrap() == b'#') as u16)
});
let borders = (top, bottom, left, right);
let mut image = [0; IMAGEDIM];
for (srcstr, dstbits) in input[2..].iter().zip(image.iter_mut()) {
*dstbits = srcstr.as_bytes()[1..(1+IMAGEDIM)].iter().fold(0, |bits, &ch| {
(bits << 1) | ((ch == b'#') as u16)
});
}
(Tile { name, borders, orientation: Orientation::Up(false) }, image.clone())
}
// note: my input has 144 tiles - that would be 12*12, or 9*16, or 8*18, or 6*24, etc. but the
// specs hint that the final arrangement will be square
fn main() {
assert!(flipbits(1) == (1 << 9));
assert!(flipbits(1 << 9) == (1));
assert!(flipbits(0x1e) == (0x0f << 5));
assert!(flipbits(0x0f << 5) == (0x1e));
let lines: Vec<_> = io::stdin().lock().lines()
.map(|line| line.unwrap())
.collect();
let tiles: Vec<(Tile, Image)> = lines.split(|line| line == "").map(parse_tile).collect();
// assume the image is a square
let dim = (tiles.len() as f64).sqrt() as usize;
let puzzle_tiles: Vec<Tile> = tiles.iter().map(|(t, _i)| *t).collect();
let state = search(State::new(dim), puzzle_tiles).unwrap();
for y in 0..dim {
for x in 0..dim {
print!("{} ", state.at(x, y).unwrap().name);
}
println!();
}
let corners = [
state.at(0, 0).unwrap().name as u64,
state.at(dim - 1, 0).unwrap().name as u64,
state.at(0, dim - 1).unwrap().name as u64,
state.at(dim - 1, dim - 1).unwrap().name as u64,
];
println!("{}", corners[0] * corners[1] * corners[2] * corners[3]);
// indexed by name for easier lookup
let tilemap: HashMap<u16, &(Tile, Image)> = tiles.iter().map(|ti| {
(ti.0.name, ti)
}).collect();
let sea = form_actual_image(&tilemap, &state);
println!("{}", water_roughness(&sea));
}
| accepts | identifier_name |
snapshot_cmd.rs | // Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot and restoration commands.
//use std::time::Duration;
//use std::path::{Path, PathBuf};
//use std::sync::Arc;
//use client_traits::SnapshotClient;
//use hash::keccak;
//use snapshot::{SnapshotConfiguration, SnapshotService as SS};
//use snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
//use snapshot::service::Service as SnapshotService;
//use ethcore::client::{Client, DatabaseCompactionProfile, VMType};
//use ethcore::miner::Miner;
//use ethcore_service::ClientService;
use types::{
ids::BlockId,
// snapshot::Progress,
// client_types::Mode,
// snapshot::RestorationStatus,
};
use crate::cache::CacheConfig;
use crate::params::{SpecType, Pruning, Switch/*, tracing_switch_to_bool, fatdb_switch_to_bool*/};
//use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
//use user_defaults::UserDefaults;
//use ethcore_private_tx;
//use db;
/// Kinds of snapshot commands.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Kind {
// Take a snapshot.
// Take,
// Restore a snapshot.
// Restore
}
/// Command for snapshot creation or restoration.
#[derive(Debug, PartialEq)]
pub struct SnapshotCommand {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
pub pruning_history: u64,
pub pruning_memory: usize,
pub tracing: Switch,
pub fat_db: Switch,
// pub compaction: DatabaseCompactionProfile,
pub file_path: Option<String>,
pub kind: Kind,
pub block_at: BlockId,
pub max_round_blocks_to_import: usize,
// pub snapshot_conf: SnapshotConfiguration,
}
// helper for reading chunks from arbitrary reader and feeding them into the
// service.
//fn restore_using<R: SnapshotReader>(snapshot: Arc<SnapshotService<Client>>, reader: &R, recover: bool) -> Result<(), String> {
// let manifest = reader.manifest();
//
// info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash);
//
// snapshot.init_restore(manifest.clone(), recover).map_err(|e| {
// format!("Failed to begin restoration: {}", e)
// })?;
//
// let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
//
// let informant_handle = snapshot.clone();
// ::std::thread::spawn(move || {
// while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done,.. } = informant_handle.status() {
// info!("Processed {}/{} state chunks and {}/{} block chunks.",
// state_chunks_done, num_state, block_chunks_done, num_blocks);
// ::std::thread::sleep(Duration::from_secs(5));
// }
// });
//
// info!("Restoring state");
// for &state_hash in &manifest.state_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(state_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash!= state_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash));
// }
//
// snapshot.feed_state_chunk(state_hash, &chunk);
// }
//
// info!("Restoring blocks");
// for &block_hash in &manifest.block_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(block_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash!= block_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash));
// }
// snapshot.feed_block_chunk(block_hash, &chunk);
// }
//
// match snapshot.status() {
// RestorationStatus::Ongoing {.. } => Err("Snapshot file is incomplete and missing chunks.".into()),
// RestorationStatus::Initializing {.. } => Err("Snapshot restoration is still initializing.".into()),
// RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()),
// RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
// RestorationStatus::Inactive => {
// info!("Restoration complete.");
// Ok(())
// }
// }
//}
impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
// fn start_service(self) -> Result<ClientService, String> {
// // load spec file
// let spec = self.spec.spec(&self.dirs.cache)?;
//
// // load genesis hash
// let genesis_hash = spec.genesis_header().hash();
//
// // database paths
// let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
//
// // user defaults path
// let user_defaults_path = db_dirs.user_defaults_path();
//
// // load user defaults
// let user_defaults = UserDefaults::load(&user_defaults_path)?;
//
// // select pruning algorithm
// let algorithm = self.pruning.to_algorithm(&user_defaults);
//
// // check if tracing is on
// let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?;
//
// // check if fatdb is on
// let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?;
//
// // prepare client and snapshot paths.
// let client_path = db_dirs.client_path(algorithm);
// let snapshot_path = db_dirs.snapshot_path();
//
// // execute upgrades
// execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
//
// // prepare client config
// let mut client_config = to_client_config(
// &self.cache_config,
// spec.name.to_lowercase(),
// Mode::Active,
// tracing,
// fat_db,
// self.compaction,
// VMType::default(),
// "".into(),
// algorithm,
// self.pruning_history,
// self.pruning_memory, | // true,
// self.max_round_blocks_to_import,
// );
//
// client_config.snapshot = self.snapshot_conf;
//
// let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
// let client_db = restoration_db_handler.open(&client_path)
// .map_err(|e| format!("Failed to open database {:?}", e))?;
//
// let service = ClientService::start(
// client_config,
// &spec,
// client_db,
// &snapshot_path,
// restoration_db_handler,
// &self.dirs.ipc_path(),
// // TODO [ToDr] don't use test miner here
// // (actually don't require miner at all)
// Arc::new(Miner::new_for_tests(&spec, None)),
// Arc::new(ethcore_private_tx::DummySigner),
// Box::new(ethcore_private_tx::NoopEncryptor),
// Default::default(),
// Default::default(),
// ).map_err(|e| format!("Client service error: {:?}", e))?;
//
// Ok(service)
// }
// restore from a snapshot
// pub fn restore(self) -> Result<(), String> {
// let file = self.file_path.clone();
// let service = self.start_service()?;
//
// warn!("Snapshot restoration is experimental and the format may be subject to change.");
// warn!("On encountering an unexpected error, please ensure that you have a recent snapshot.");
//
// let snapshot = service.snapshot_service();
//
// if let Some(file) = file {
// info!("Attempting to restore from snapshot at '{}'", file);
//
// let reader = PackedReader::new(Path::new(&file))
// .map_err(|e| format!("Couldn't open snapshot file: {}", e))
// .and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
//
// let reader = reader?;
// restore_using(snapshot, &reader, true)?;
// } else {
// info!("Attempting to restore from local snapshot.");
//
// // attempting restoration with recovery will lead to deadlock
// // as we currently hold a read lock on the service's reader.
// match *snapshot.reader() {
// Some(ref reader) => restore_using(snapshot.clone(), reader, false)?,
// None => return Err("No local snapshot found.".into()),
// }
// }
//
// Ok(())
// }
// Take a snapshot from the head of the chain.
// pub fn take_snapshot(self) -> Result<(), String> {
// let file_path = self.file_path.clone().ok_or("No file path provided.".to_owned())?;
// let file_path: PathBuf = file_path.into();
// let block_at = self.block_at;
// let service = self.start_service()?;
//
// warn!("Snapshots are currently experimental. File formats may be subject to change.");
//
// let writer = PackedWriter::new(&file_path)
// .map_err(|e| format!("Failed to open snapshot writer: {}", e))?;
//
// let progress = Arc::new(Progress::default());
// let p = progress.clone();
// let informant_handle = ::std::thread::spawn(move || {
// ::std::thread::sleep(Duration::from_secs(5));
//
// let mut last_size = 0;
// while!p.done() {
// let cur_size = p.size();
// if cur_size!= last_size {
// last_size = cur_size;
// let bytes = ::informant::format_bytes(cur_size as usize);
// info!("Snapshot: {} accounts {} blocks {}", p.accounts(), p.blocks(), bytes);
// }
//
// ::std::thread::sleep(Duration::from_secs(5));
// }
// });
//
// if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) {
// let _ = ::std::fs::remove_file(&file_path);
// return Err(format!("Encountered fatal error while creating snapshot: {}", e));
// }
//
// info!("snapshot creation complete");
//
// assert!(progress.done());
// informant_handle.join().map_err(|_| "failed to join logger thread")?;
//
// Ok(())
// }
}
// Execute this snapshot command.
//pub fn execute(cmd: SnapshotCommand) -> Result<String, String> {
// match cmd.kind {
// Kind::Take => cmd.take_snapshot()?,
// Kind::Restore => cmd.restore()?,
// }
//
// Ok(String::new())
//} | random_line_split |
|
snapshot_cmd.rs | // Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot and restoration commands.
//use std::time::Duration;
//use std::path::{Path, PathBuf};
//use std::sync::Arc;
//use client_traits::SnapshotClient;
//use hash::keccak;
//use snapshot::{SnapshotConfiguration, SnapshotService as SS};
//use snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
//use snapshot::service::Service as SnapshotService;
//use ethcore::client::{Client, DatabaseCompactionProfile, VMType};
//use ethcore::miner::Miner;
//use ethcore_service::ClientService;
use types::{
ids::BlockId,
// snapshot::Progress,
// client_types::Mode,
// snapshot::RestorationStatus,
};
use crate::cache::CacheConfig;
use crate::params::{SpecType, Pruning, Switch/*, tracing_switch_to_bool, fatdb_switch_to_bool*/};
//use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
//use user_defaults::UserDefaults;
//use ethcore_private_tx;
//use db;
/// Kinds of snapshot commands.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Kind {
// Take a snapshot.
// Take,
// Restore a snapshot.
// Restore
}
/// Command for snapshot creation or restoration.
#[derive(Debug, PartialEq)]
pub struct | {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
pub pruning_history: u64,
pub pruning_memory: usize,
pub tracing: Switch,
pub fat_db: Switch,
// pub compaction: DatabaseCompactionProfile,
pub file_path: Option<String>,
pub kind: Kind,
pub block_at: BlockId,
pub max_round_blocks_to_import: usize,
// pub snapshot_conf: SnapshotConfiguration,
}
// helper for reading chunks from arbitrary reader and feeding them into the
// service.
//fn restore_using<R: SnapshotReader>(snapshot: Arc<SnapshotService<Client>>, reader: &R, recover: bool) -> Result<(), String> {
// let manifest = reader.manifest();
//
// info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash);
//
// snapshot.init_restore(manifest.clone(), recover).map_err(|e| {
// format!("Failed to begin restoration: {}", e)
// })?;
//
// let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
//
// let informant_handle = snapshot.clone();
// ::std::thread::spawn(move || {
// while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done,.. } = informant_handle.status() {
// info!("Processed {}/{} state chunks and {}/{} block chunks.",
// state_chunks_done, num_state, block_chunks_done, num_blocks);
// ::std::thread::sleep(Duration::from_secs(5));
// }
// });
//
// info!("Restoring state");
// for &state_hash in &manifest.state_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(state_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash!= state_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash));
// }
//
// snapshot.feed_state_chunk(state_hash, &chunk);
// }
//
// info!("Restoring blocks");
// for &block_hash in &manifest.block_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(block_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash!= block_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash));
// }
// snapshot.feed_block_chunk(block_hash, &chunk);
// }
//
// match snapshot.status() {
// RestorationStatus::Ongoing {.. } => Err("Snapshot file is incomplete and missing chunks.".into()),
// RestorationStatus::Initializing {.. } => Err("Snapshot restoration is still initializing.".into()),
// RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()),
// RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
// RestorationStatus::Inactive => {
// info!("Restoration complete.");
// Ok(())
// }
// }
//}
impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
// fn start_service(self) -> Result<ClientService, String> {
// // load spec file
// let spec = self.spec.spec(&self.dirs.cache)?;
//
// // load genesis hash
// let genesis_hash = spec.genesis_header().hash();
//
// // database paths
// let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
//
// // user defaults path
// let user_defaults_path = db_dirs.user_defaults_path();
//
// // load user defaults
// let user_defaults = UserDefaults::load(&user_defaults_path)?;
//
// // select pruning algorithm
// let algorithm = self.pruning.to_algorithm(&user_defaults);
//
// // check if tracing is on
// let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?;
//
// // check if fatdb is on
// let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?;
//
// // prepare client and snapshot paths.
// let client_path = db_dirs.client_path(algorithm);
// let snapshot_path = db_dirs.snapshot_path();
//
// // execute upgrades
// execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
//
// // prepare client config
// let mut client_config = to_client_config(
// &self.cache_config,
// spec.name.to_lowercase(),
// Mode::Active,
// tracing,
// fat_db,
// self.compaction,
// VMType::default(),
// "".into(),
// algorithm,
// self.pruning_history,
// self.pruning_memory,
// true,
// self.max_round_blocks_to_import,
// );
//
// client_config.snapshot = self.snapshot_conf;
//
// let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
// let client_db = restoration_db_handler.open(&client_path)
// .map_err(|e| format!("Failed to open database {:?}", e))?;
//
// let service = ClientService::start(
// client_config,
// &spec,
// client_db,
// &snapshot_path,
// restoration_db_handler,
// &self.dirs.ipc_path(),
// // TODO [ToDr] don't use test miner here
// // (actually don't require miner at all)
// Arc::new(Miner::new_for_tests(&spec, None)),
// Arc::new(ethcore_private_tx::DummySigner),
// Box::new(ethcore_private_tx::NoopEncryptor),
// Default::default(),
// Default::default(),
// ).map_err(|e| format!("Client service error: {:?}", e))?;
//
// Ok(service)
// }
// restore from a snapshot
// pub fn restore(self) -> Result<(), String> {
// let file = self.file_path.clone();
// let service = self.start_service()?;
//
// warn!("Snapshot restoration is experimental and the format may be subject to change.");
// warn!("On encountering an unexpected error, please ensure that you have a recent snapshot.");
//
// let snapshot = service.snapshot_service();
//
// if let Some(file) = file {
// info!("Attempting to restore from snapshot at '{}'", file);
//
// let reader = PackedReader::new(Path::new(&file))
// .map_err(|e| format!("Couldn't open snapshot file: {}", e))
// .and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
//
// let reader = reader?;
// restore_using(snapshot, &reader, true)?;
// } else {
// info!("Attempting to restore from local snapshot.");
//
// // attempting restoration with recovery will lead to deadlock
// // as we currently hold a read lock on the service's reader.
// match *snapshot.reader() {
// Some(ref reader) => restore_using(snapshot.clone(), reader, false)?,
// None => return Err("No local snapshot found.".into()),
// }
// }
//
// Ok(())
// }
// Take a snapshot from the head of the chain.
// pub fn take_snapshot(self) -> Result<(), String> {
// let file_path = self.file_path.clone().ok_or("No file path provided.".to_owned())?;
// let file_path: PathBuf = file_path.into();
// let block_at = self.block_at;
// let service = self.start_service()?;
//
// warn!("Snapshots are currently experimental. File formats may be subject to change.");
//
// let writer = PackedWriter::new(&file_path)
// .map_err(|e| format!("Failed to open snapshot writer: {}", e))?;
//
// let progress = Arc::new(Progress::default());
// let p = progress.clone();
// let informant_handle = ::std::thread::spawn(move || {
// ::std::thread::sleep(Duration::from_secs(5));
//
// let mut last_size = 0;
// while!p.done() {
// let cur_size = p.size();
// if cur_size!= last_size {
// last_size = cur_size;
// let bytes = ::informant::format_bytes(cur_size as usize);
// info!("Snapshot: {} accounts {} blocks {}", p.accounts(), p.blocks(), bytes);
// }
//
// ::std::thread::sleep(Duration::from_secs(5));
// }
// });
//
// if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) {
// let _ = ::std::fs::remove_file(&file_path);
// return Err(format!("Encountered fatal error while creating snapshot: {}", e));
// }
//
// info!("snapshot creation complete");
//
// assert!(progress.done());
// informant_handle.join().map_err(|_| "failed to join logger thread")?;
//
// Ok(())
// }
}
// Execute this snapshot command.
//pub fn execute(cmd: SnapshotCommand) -> Result<String, String> {
// match cmd.kind {
// Kind::Take => cmd.take_snapshot()?,
// Kind::Restore => cmd.restore()?,
// }
//
// Ok(String::new())
//}
| SnapshotCommand | identifier_name |
config.rs | use anyhow::{Context, Error};
use clipboard::{ClipboardContext, ClipboardProvider};
use image::Rgba;
use silicon::directories::PROJECT_DIRS;
use silicon::formatter::{ImageFormatter, ImageFormatterBuilder};
use silicon::utils::{Background, ShadowAdder, ToRgba};
use std::ffi::OsString;
use std::fs::File;
use std::io::{stdin, Read};
use std::num::ParseIntError;
use std::path::PathBuf;
use structopt::clap::AppSettings::ColoredHelp;
use structopt::StructOpt;
use syntect::highlighting::{Theme, ThemeSet};
use syntect::parsing::{SyntaxReference, SyntaxSet};
pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> |
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>;
type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))]
pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a.tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
}
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self.line_pad)
.window_controls(!self.no_window_controls)
.window_title(self.window_title.clone())
.line_number(!self.no_line_number)
.font(self.font.clone().unwrap_or_default())
.round_corner(!self.no_round_corner)
.shadow_adder(self.get_shadow_adder()?)
.tab_width(self.tab_width)
.highlight_lines(self.highlight_lines.clone().unwrap_or_default())
.line_offset(self.line_offset);
Ok(formatter.build()?)
}
pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> {
Ok(ShadowAdder::new()
.background(match &self.background_image {
Some(path) => Background::Image(image::open(path)?.to_rgba8()),
None => Background::Solid(self.background),
})
.shadow_color(self.shadow_color)
.blur_radius(self.shadow_blur_radius)
.pad_horiz(self.pad_horiz)
.pad_vert(self.pad_vert)
.offset_x(self.shadow_offset_x)
.offset_y(self.shadow_offset_y))
}
pub fn get_expanded_output(&self) -> Option<PathBuf> {
let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true);
if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) {
self.output
.as_ref()
.map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into())
} else {
self.output.clone()
}
}
}
| {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line| !line.starts_with('#') && !line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
} | identifier_body |
config.rs | use anyhow::{Context, Error};
use clipboard::{ClipboardContext, ClipboardProvider};
use image::Rgba;
use silicon::directories::PROJECT_DIRS;
use silicon::formatter::{ImageFormatter, ImageFormatterBuilder};
use silicon::utils::{Background, ShadowAdder, ToRgba};
use std::ffi::OsString;
use std::fs::File;
use std::io::{stdin, Read};
use std::num::ParseIntError;
use std::path::PathBuf;
use structopt::clap::AppSettings::ColoredHelp;
use structopt::StructOpt;
use syntect::highlighting::{Theme, ThemeSet};
use syntect::parsing::{SyntaxReference, SyntaxSet};
pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line|!line.starts_with('#') &&!line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
}
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>;
type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))]
pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a.tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
}
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self.line_pad)
.window_controls(!self.no_window_controls)
.window_title(self.window_title.clone())
.line_number(!self.no_line_number)
.font(self.font.clone().unwrap_or_default())
.round_corner(!self.no_round_corner)
.shadow_adder(self.get_shadow_adder()?)
.tab_width(self.tab_width)
.highlight_lines(self.highlight_lines.clone().unwrap_or_default())
.line_offset(self.line_offset);
Ok(formatter.build()?)
}
pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> {
Ok(ShadowAdder::new()
.background(match &self.background_image {
Some(path) => Background::Image(image::open(path)?.to_rgba8()),
None => Background::Solid(self.background),
})
.shadow_color(self.shadow_color)
.blur_radius(self.shadow_blur_radius)
.pad_horiz(self.pad_horiz)
.pad_vert(self.pad_vert)
.offset_x(self.shadow_offset_x)
.offset_y(self.shadow_offset_y))
}
pub fn | (&self) -> Option<PathBuf> {
let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true);
if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) {
self.output
.as_ref()
.map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into())
} else {
self.output.clone()
}
}
}
| get_expanded_output | identifier_name |
config.rs | use anyhow::{Context, Error};
use clipboard::{ClipboardContext, ClipboardProvider};
use image::Rgba;
use silicon::directories::PROJECT_DIRS;
use silicon::formatter::{ImageFormatter, ImageFormatterBuilder};
use silicon::utils::{Background, ShadowAdder, ToRgba};
use std::ffi::OsString;
use std::fs::File;
use std::io::{stdin, Read};
use std::num::ParseIntError;
use std::path::PathBuf;
use structopt::clap::AppSettings::ColoredHelp;
use structopt::StructOpt;
use syntect::highlighting::{Theme, ThemeSet};
use syntect::parsing::{SyntaxReference, SyntaxSet};
pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line|!line.starts_with('#') &&!line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
}
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>; | pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a.tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
}
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self.line_pad)
.window_controls(!self.no_window_controls)
.window_title(self.window_title.clone())
.line_number(!self.no_line_number)
.font(self.font.clone().unwrap_or_default())
.round_corner(!self.no_round_corner)
.shadow_adder(self.get_shadow_adder()?)
.tab_width(self.tab_width)
.highlight_lines(self.highlight_lines.clone().unwrap_or_default())
.line_offset(self.line_offset);
Ok(formatter.build()?)
}
pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> {
Ok(ShadowAdder::new()
.background(match &self.background_image {
Some(path) => Background::Image(image::open(path)?.to_rgba8()),
None => Background::Solid(self.background),
})
.shadow_color(self.shadow_color)
.blur_radius(self.shadow_blur_radius)
.pad_horiz(self.pad_horiz)
.pad_vert(self.pad_vert)
.offset_x(self.shadow_offset_x)
.offset_y(self.shadow_offset_y))
}
pub fn get_expanded_output(&self) -> Option<PathBuf> {
let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true);
if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) {
self.output
.as_ref()
.map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into())
} else {
self.output.clone()
}
}
} | type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))] | random_line_split |
config.rs | use anyhow::{Context, Error};
use clipboard::{ClipboardContext, ClipboardProvider};
use image::Rgba;
use silicon::directories::PROJECT_DIRS;
use silicon::formatter::{ImageFormatter, ImageFormatterBuilder};
use silicon::utils::{Background, ShadowAdder, ToRgba};
use std::ffi::OsString;
use std::fs::File;
use std::io::{stdin, Read};
use std::num::ParseIntError;
use std::path::PathBuf;
use structopt::clap::AppSettings::ColoredHelp;
use structopt::StructOpt;
use syntect::highlighting::{Theme, ThemeSet};
use syntect::parsing::{SyntaxReference, SyntaxSet};
pub fn config_file() -> PathBuf {
std::env::var("SILICON_CONFIG_PATH")
.ok()
.map(PathBuf::from)
.filter(|config_path| config_path.is_file())
.unwrap_or_else(|| PROJECT_DIRS.config_dir().join("config"))
}
pub fn get_args_from_config_file() -> Vec<OsString> {
let args = std::fs::read_to_string(config_file())
.ok()
.and_then(|content| {
content
.split('\n')
.map(|line| line.trim())
.filter(|line|!line.starts_with('#') &&!line.is_empty())
.map(shell_words::split)
.collect::<Result<Vec<_>, _>>()
.ok()
})
.unwrap_or_default();
args.iter().flatten().map(OsString::from).collect()
}
fn parse_str_color(s: &str) -> Result<Rgba<u8>, Error> {
s.to_rgba()
.map_err(|_| format_err!("Invalid color: `{}`", s))
}
fn parse_font_str(s: &str) -> Vec<(String, f32)> {
let mut result = vec![];
for font in s.split(';') {
let tmp = font.split('=').collect::<Vec<_>>();
let font_name = tmp[0].to_owned();
let font_size = tmp
.get(1)
.map(|s| s.parse::<f32>().unwrap())
.unwrap_or(26.0);
result.push((font_name, font_size));
}
result
}
fn parse_line_range(s: &str) -> Result<Vec<u32>, ParseIntError> {
let mut result = vec![];
for range in s.split(';') {
let range: Vec<u32> = range
.split('-')
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<_>, _>>()?;
if range.len() == 1 {
result.push(range[0])
} else {
for i in range[0]..=range[1] {
result.push(i);
}
}
}
Ok(result)
}
// https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md#support-optional-vectors-of-arguments-for-distinguishing-between--o-1-2--o-and-no-option-provided-at-all-by-sphynx-180
type FontList = Vec<(String, f32)>;
type Lines = Vec<u32>;
#[derive(StructOpt, Debug)]
#[structopt(name = "silicon")]
#[structopt(global_setting(ColoredHelp))]
pub struct Config {
/// Background image
#[structopt(long, value_name = "IMAGE", conflicts_with = "background")]
pub background_image: Option<PathBuf>,
/// Background color of the image
#[structopt(
long,
short,
value_name = "COLOR",
default_value = "#aaaaff",
parse(try_from_str = parse_str_color)
)]
pub background: Rgba<u8>,
/// Show the path of silicon config file
#[structopt(long)]
pub config_file: bool,
/// Read input from clipboard.
#[structopt(long)]
pub from_clipboard: bool,
/// File to read. If not set, stdin will be use.
#[structopt(value_name = "FILE", parse(from_os_str))]
pub file: Option<PathBuf>,
/// The fallback font list. eg. 'Hack; SimSun=31'
#[structopt(long, short, value_name = "FONT", parse(from_str = parse_font_str))]
pub font: Option<FontList>,
/// Lines to high light. rg. '1-3; 4'
#[structopt(long, value_name = "LINES", parse(try_from_str = parse_line_range))]
pub highlight_lines: Option<Lines>,
/// The language for syntax highlighting. You can use full name ("Rust") or file extension ("rs").
#[structopt(short, value_name = "LANG", long)]
pub language: Option<String>,
/// Pad between lines
#[structopt(long, value_name = "PAD", default_value = "2")]
pub line_pad: u32,
/// Line number offset
#[structopt(long, value_name = "OFFSET", default_value = "1")]
pub line_offset: u32,
/// List all themes.
#[structopt(long)]
pub list_themes: bool,
/// List all available fonts in your system
#[structopt(long)]
pub list_fonts: bool,
/// Write output image to specific location instead of cwd.
#[structopt(
short,
long,
value_name = "PATH",
required_unless_one = &["config-file", "list-fonts", "list-themes", "to-clipboard", "build-cache"]
)]
pub output: Option<PathBuf>,
/// Hide the window controls.
#[structopt(long)]
pub no_window_controls: bool,
/// Show window title
#[structopt(long, value_name = "WINDOW_TITLE")]
pub window_title: Option<String>,
/// Hide the line number.
#[structopt(long)]
pub no_line_number: bool,
/// Don't round the corner
#[structopt(long)]
pub no_round_corner: bool,
/// Pad horiz
#[structopt(long, value_name = "PAD", default_value = "80")]
pub pad_horiz: u32,
/// Pad vert
#[structopt(long, value_name = "PAD", default_value = "100")]
pub pad_vert: u32,
/// Color of shadow
#[structopt(
long,
value_name = "COLOR",
default_value = "#555555",
parse(try_from_str = parse_str_color)
)]
pub shadow_color: Rgba<u8>,
/// Blur radius of the shadow. (set it to 0 to hide shadow)
#[structopt(long, value_name = "R", default_value = "0")]
pub shadow_blur_radius: f32,
/// Shadow's offset in Y axis
#[structopt(long, value_name = "Y", default_value = "0")]
pub shadow_offset_y: i32,
/// Shadow's offset in X axis
#[structopt(long, value_name = "X", default_value = "0")]
pub shadow_offset_x: i32,
/// Tab width
#[structopt(long, value_name = "WIDTH", default_value = "4")]
pub tab_width: u8,
/// The syntax highlight theme. It can be a theme name or path to a.tmTheme file.
#[structopt(long, value_name = "THEME", default_value = "Dracula")]
pub theme: String,
/// Copy the output image to clipboard.
#[structopt(short = "c", long)]
pub to_clipboard: bool,
// Draw a custom text on the bottom right corner
// #[structopt(long)]
// watermark: Option<String>,
/// build syntax definition and theme cache
#[structopt(long, value_name = "OUTPUT_DIR")]
pub build_cache: Option<Option<PathBuf>>,
}
impl Config {
pub fn get_source_code<'a>(
&self,
ps: &'a SyntaxSet,
) -> Result<(&'a SyntaxReference, String), Error> {
let possible_language = self.language.as_ref().map(|language| {
ps.find_syntax_by_token(language)
.ok_or_else(|| format_err!("Unsupported language: {}", language))
});
if self.from_clipboard {
let mut ctx = ClipboardContext::new()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let code = ctx
.get_contents()
.map_err(|e| format_err!("failed to access clipboard: {}", e))?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&code)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, code));
}
if let Some(path) = &self.file |
let mut stdin = stdin();
let mut s = String::new();
stdin.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_by_first_line(&s)
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
Ok((language, s))
}
pub fn theme(&self, ts: &ThemeSet) -> Result<Theme, Error> {
if let Some(theme) = ts.themes.get(&self.theme) {
Ok(theme.clone())
} else {
ThemeSet::get_theme(&self.theme)
.context(format!("Canot load the theme: {}", self.theme))
}
}
pub fn get_formatter(&self) -> Result<ImageFormatter, Error> {
let formatter = ImageFormatterBuilder::new()
.line_pad(self.line_pad)
.window_controls(!self.no_window_controls)
.window_title(self.window_title.clone())
.line_number(!self.no_line_number)
.font(self.font.clone().unwrap_or_default())
.round_corner(!self.no_round_corner)
.shadow_adder(self.get_shadow_adder()?)
.tab_width(self.tab_width)
.highlight_lines(self.highlight_lines.clone().unwrap_or_default())
.line_offset(self.line_offset);
Ok(formatter.build()?)
}
pub fn get_shadow_adder(&self) -> Result<ShadowAdder, Error> {
Ok(ShadowAdder::new()
.background(match &self.background_image {
Some(path) => Background::Image(image::open(path)?.to_rgba8()),
None => Background::Solid(self.background),
})
.shadow_color(self.shadow_color)
.blur_radius(self.shadow_blur_radius)
.pad_horiz(self.pad_horiz)
.pad_vert(self.pad_vert)
.offset_x(self.shadow_offset_x)
.offset_y(self.shadow_offset_y))
}
pub fn get_expanded_output(&self) -> Option<PathBuf> {
let need_expand = self.output.as_ref().map(|p| p.starts_with("~")) == Some(true);
if let (Ok(home_dir), true) = (std::env::var("HOME"), need_expand) {
self.output
.as_ref()
.map(|p| p.to_string_lossy().replacen('~', &home_dir, 1).into())
} else {
self.output.clone()
}
}
}
| {
let mut s = String::new();
let mut file = File::open(path)?;
file.read_to_string(&mut s)?;
let language = possible_language.unwrap_or_else(|| {
ps.find_syntax_for_file(path)?
.ok_or_else(|| format_err!("Failed to detect the language"))
})?;
return Ok((language, s));
} | conditional_block |
import.rs | use std::{io, path::PathBuf};
use std::collections::HashMap;
use crate::mm0::{SortId, TermId, ThmId};
#[cfg(debug_assertions)] use mm0b_parser::BasicMmbFile as MmbFile;
#[cfg(not(debug_assertions))] use mm0b_parser::BareMmbFile as MmbFile;
use super::Mm0Writer;
macro_rules! const_assert { ($cond:expr) => { let _ = [(); 0 - (!($cond) as usize)]; } }
macro_rules! build_consts {
(@CONST sort $name:ident $e:expr) => { pub const $name: SortId = SortId($e); };
(@CONST term $name:ident $e:expr) => { pub const $name: TermId = TermId($e); };
(@CONST thm $name:ident $e:expr) => { pub const $name: ThmId = ThmId($e); };
(@CONST tydef $name:ident $e:expr) => { pub const $name: TydefId = TydefId($e); };
(@INDEX $mmb:expr, sort $name:ident) => { $mmb.sort_index(mm0_const::$name) };
(@INDEX $mmb:expr, term $name:ident) => { $mmb.term_index(mm0_const::$name) };
(@INDEX $mmb:expr, thm $name:ident) => { $mmb.thm_index(mm0_const::$name) };
(@CONJ $mmb:expr, tydef $name:ident: $s:tt) => {};
(@CONJ $mmb:expr, $ty:ident $name:ident: $s:tt) => {
if build_consts!(@INDEX $mmb, $ty $name)?.value()?!= $s { return None }
};
(@GET $index:expr, sort $s:tt) => { $index.sorts.get($s).map(|n| n.0 as u32) };
(@GET $index:expr, term $s:tt) => { $index.terms.get($s).map(|n| n.0) };
(@GET $index:expr, thm $s:tt) => { $index.thms.get($s).map(|n| n.0) };
(@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, tydef $s:tt = $e:expr) => {
writeln!($out, " tydef {}: {} = {};", $name, stringify!($s), $e).unwrap()
};
(@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, $ty:ident $s:tt = $e:expr) => {
$print1(&mut $out, stringify!($ty), $name, $s, build_consts!(@GET $index, $ty $s), $e)
};
(@TYDEFS $(($nil:tt ($name:ident $s:expr, $e:expr)))*) => {
pub const TYDEFS: [
(ThmId, TermId, [(TermId, ThmId); 2], [ThmId; 2]);
[$($nil),*].len()
] = {
const_assert!({
let mut n = 0;
($($e == (n, n += 1).0 &&)* true, n).0
});
use mm0_const::*;
[$($s),*]
};
};
(@FILTER_TYDEFS $(($(tydef $($tydef:literal)?)? $(sort)? $(term)? $(thm)?: $t:tt))*) => {
build_consts! { @TYDEFS $($($($tydef)? (() $t))?)* }
};
($($ty:ident $name:ident: $s:tt = $e:expr;)*) => {
pub mod mm0_const {
use crate::mm0::{SortId, TermId, ThmId, TydefId};
$(build_consts!(@CONST $ty $name $e);)*
}
build_consts! { @FILTER_TYDEFS $(($ty: ($name $s, $e)))* }
#[cfg(debug_assertions)]
fn check_consts(mmb: &MmbFile<'_>) {
#[derive(Default)]
struct Index<'a> {
sorts: HashMap<&'a str, SortId>,
terms: HashMap<&'a str, TermId>,
thms: HashMap<&'a str, ThmId>,
}
#[cold] fn rebuild_consts(mmb: &MmbFile<'_>) {
use std::fmt::Write;
let mut index = Index::default();
for n in (0..mmb.header.num_sorts).map(SortId) {
if let Some(s) = mmb.sort_index(n).and_then(|ix| ix.value()) {
index.sorts.insert(s, n);
}
}
for n in (0..mmb.header.num_terms.get()).map(TermId) {
if let Some(s) = mmb.term_index(n).and_then(|ix| ix.value()) {
index.terms.insert(s, n);
}
}
for n in (0..mmb.header.num_thms.get()).map(ThmId) {
if let Some(s) = mmb.thm_index(n).and_then(|ix| ix.value()) {
index.thms.insert(s, n);
}
}
let mut out = String::new();
fn print1(out: &mut String, ty: &str, name: &str, s: &str, o: Option<u32>, e: u32) {
if let Some(n) = o {
write!(out, " {} {}: {:?} = {};", ty, name, s, n).unwrap();
if n == e { *out += "\n" } else { write!(out, " // not {}\n", e).unwrap() }
} else {
eprintln!("{} {:?} not found", ty, s);
write!(out, " {} {}: {:?} =???;\n", ty, name, s).unwrap();
}
}
$(build_consts!(@PRINT print1, out, index, stringify!($name), $ty $s = $e);)*
eprintln!("build_consts! {{\n{}}}", out);
panic!("Rebuild needed. Put this in'mm0/import.rs'");
}
#[inline]
fn check_consts(mmb: &MmbFile<'_>) -> Option<()> {
$(build_consts!(@CONJ mmb, $ty $name: $s);)*
Some(())
}
if check_consts(mmb).is_none() { rebuild_consts(mmb) }
}
}
}
// This is a list of indexes into the hol.mmb file; it is checked during startup,
// and if any of the indexes are wrong then it will print the appropriate replacement.
// (TODO: use code generation)
build_consts! {
sort WFF: "wff" = 0;
sort TYPE: "type" = 1;
sort TERM: "term" = 2;
term MM0_IM: "im" = 0;
term MM0_AN: "an" = 1;
term TY: "ty" = 4;
term THM: "thm" = 9;
term APP: "app" = 5;
term LAM: "lam" = 6;
term BOOL: "bool" = 2;
term FUN: "fun" = 3;
term EQ: "eq" = 7;
thm EQ_T: "eqT" = 27;
term TRUE: "T" = 11;
thm TRUE_T: "TT" = 51;
thm TRUE_DEF: "T_DEF" = 52;
term AND: "and" = 16;
thm AND_T: "andT" = 68;
thm AND_DEF: "AND_DEF" = 69;
term IMP: "imp" = 18;
thm IMP_T: "impT" = 76;
thm IMP_DEF: "IMP_DEF" = 77;
term ALL: "all" = 20;
thm ALL_T: "allT" = 90;
thm ALL_DEF: "FORALL_DEF" = 91;
term EX: "ex" = 22;
thm EX_T: "exT" = 105;
thm EX_DEF: "EXISTS_DEF" = 106;
term OR: "or" = 24;
thm OR_T: "orT" = 127;
thm OR_DEF: "OR_DEF" = 128;
term FALSE: "F" = 26;
thm FALSE_T: "FT" = 138;
thm FALSE_DEF: "F_DEF" = 139;
term NOT: "not" = 27;
thm NOT_T: "notT" = 142;
thm NOT_DEF: "NOT_DEF" = 143;
term EU: "eu" = 29;
thm EU_T: "euT" = 153;
thm EU_DEF: "EU_DEF" = 154;
thm ETA_AX: "ETA_AX" = 104;
term SEL: "sel" = 15;
thm SEL_T: "selT" = 63;
thm SELECT_AX: "SELECT_AX" = 157;
term COND: "COND" = 30;
thm COND_T: "condT" = 163;
thm COND_DEF: "COND_DEF" = 164;
thm CONJ: "CONJ" = 72;
thm CONJ_PAIR: "CONJ_PAIR" = 73;
thm CONJUNCT1: "CONJUNCT1" = 74;
thm CONJUNCT2: "CONJUNCT2" = 75;
thm REFL: "refl" = 33;
thm AEQ: "aeq" = 34;
thm AEQ1: "aeq1" = 35;
thm AEQ2: "aeq2" = 36;
thm MP: "MP" = 81;
thm DISCH: "DISCH" = 82;
thm UNDISCH: "UNDISCH" = 84;
thm IMP_ANTISYM: "IMP_ANTISYM" = 85;
thm EQ_IMP1: "EQ_IMP1" = 86;
thm EQ_IMP2: "EQ_IMP2" = 87;
thm IMP_ID: "IMP_ID" = 88;
thm IMP_TRANS: "IMP_TRANS" = 89;
thm SPEC: "SPEC" = 97;
thm GEN: "GEN" = 98;
thm CHOOSE: "CHOOSE" = 111;
thm EXISTS: "EXISTS" = 113;
thm DISJ1: "DISJ1" = 132;
thm DISJ2: "DISJ2" = 134;
thm DISJ_CASES: "DISJ_CASES" = 136;
thm CONTR: "CONTR" = 140;
thm NOT_ELIM: "NOT_ELIM" = 147;
thm NOT_INTRO: "NOT_INTRO" = 148;
thm EQF_INTRO: "EQF_INTRO" = 149;
thm EQF_ELIM: "EQF_ELIM" = 150;
thm NOT_FALSE: "NOT_FALSE" = 151;
thm CCONTR: "CCONTR" = 162;
thm PROD_TYBIJ1: "prod_tybij1" = 172;
thm PROD_TYBIJ2: "prod_tybij2" = 173;
thm ONE_ONE_THM: "ONE_ONE" = 192;
thm ONTO_THM: "ONTO" = 197;
thm INF: "inf" = 198;
term MK_PAIR: "mk_pair" = 31;
thm MK_PAIR_T: "mk_pairT" = 165;
thm MK_PAIR_DEF: "mk_pair_DEF" = 166;
term PROD: "prod" = 32;
thm PROD_THM: "PROD_THM" = 168;
term ABS_PROD: "ABS_prod" = 33;
thm ABS_PROD_T: "ABS_prodT" = 170;
term REP_PROD: "REP_prod" = 34;
thm REP_PROD_T: "REP_prodT" = 171;
thm PROD_BIJ1: "PROD_BIJ1" = 174;
thm PROD_BIJ2: "PROD_BIJ2" = 175;
tydef PROD_TYDEF: (PROD_THM, PROD,
[(ABS_PROD, ABS_PROD_T), (REP_PROD, REP_PROD_T)], [PROD_BIJ1, PROD_BIJ2]) = 0;
term PAIR: "pr" = 35;
thm PAIR_T: "prT" = 176;
thm PAIR_DEF: "PAIR_DEF" = 177;
term FST: "fst" = 36;
thm FST_T: "fstT" = 180;
thm FST_DEF: "FST_DEF" = 181;
term SND: "snd" = 37;
thm SND_T: "sndT" = 184;
thm SND_DEF: "SND_DEF" = 185;
term IND: "ind" = 40;
term ONE_ONE: "one_one" = 38;
thm ONE_ONE_T: "one_one_T" = 188;
thm ONE_ONE_BD: "one_one_BD" = 189;
thm ONE_ONE_DEF: "one_one_DEF" = 191;
term ONTO: "onto" = 39;
thm ONTO_T: "onto_T" = 193;
thm ONTO_BD: "onto_BD" = 194;
thm ONTO_DEF: "onto_DEF" = 196;
thm INFINITY_AX: "inf" = 198;
term IND_SUC: "IND_SUC" = 41;
thm IND_SUC_T: "IND_SUC_T" = 199;
thm IND_SUC_DEF: "IND_SUC_DEF" = 200;
term IND_0: "IND_0" = 42;
thm IND_0_T: "IND_0_T" = 203;
thm IND_0_DEF: "IND_0_DEF" = 204;
term NUM_REP: "NUM_REP" = 43;
thm NUM_REP_T: "NUM_REP_T" = 205;
thm NUM_REP_DEF: "NUM_REP_DEF" = 206;
term NUM: "num" = 44;
thm NUM_THM: "NUM_THM" = 207;
term MK_NUM: "mk_num" = 45;
thm MK_NUM_T: "mk_numT" = 208;
term DEST_NUM: "dest_num" = 46;
thm DEST_NUM_T: "dest_numT" = 209;
thm NUM_BIJ1: "NUM_BIJ1" = 212;
thm NUM_BIJ2: "NUM_BIJ2" = 213;
tydef NUM_TYDEF: (NUM_THM, NUM,
[(MK_NUM, MK_NUM_T), (DEST_NUM, DEST_NUM_T)], [NUM_BIJ1, NUM_BIJ2]) = 1;
term ZERO: "_0" = 47;
thm ZERO_T: "_0T" = 214;
thm ZERO_DEF: "_0_DEF" = 215;
term SUC: "suc" = 48;
thm SUC_T: "sucT" = 216;
thm SUC_BD: "suc_BD" = 218;
thm SUC_DEF: "suc_DEF" = 219;
term NUMERAL: "NUMERAL" = 49;
thm NUMERAL_T: "NUMERAL_T" = 220;
thm NUMERAL_BD: "NUMERAL_BD" = 222;
thm NUMERAL_DEF: "NUMERAL_DEF" = 223;
term BIT0: "bit0" = 51;
thm BIT0_T: "bit0T" = 230;
thm BIT0_DEF: "bit0_DEF" = 232;
term BIT1: "bit1" = 52;
thm BIT1_T: "bit1T" = 233;
thm BIT1_BD: "bit1_BD" = 235;
thm BIT1_DEF: "bit1_DEF" = 236;
term PRE: "pre" = 54;
thm PRE_T: "preT" = 238;
thm PRE_DEF: "pre_DEF" = 239;
thm PRE_SPEC: "PRE" = 240;
term ADD: "add" = 55;
thm ADD_T: "addT" = 241;
thm ADD_DEF: "add_DEF" = 243;
thm ADD_SPEC: "ADD" = 244;
term MUL: "mul" = 57;
thm MUL_T: "mulT" = 245;
thm MUL_DEF: "mul_DEF" = 247;
thm MUL_SPEC: "MUL" = 248;
term EXP: "exp" = 59;
thm EXP_T: "expT" = 250;
thm EXP_DEF: "exp_DEF" = 252;
thm EXP_SPEC: "EXP" = 253;
term LE: "le" = 60;
thm LE_T: "leT" = 254;
thm LE_DEF: "le_DEF" = 256;
thm LE_SPEC: "LE" = 257;
term LT: "lt" = 62;
thm LT_T: "ltT" = 258;
thm LT_DEF: "lt_DEF" = 260;
thm LT_SPEC: "LT" = 261;
term GE: "ge" = 64;
thm GE_T: "geT" = 263;
thm GE_BD: "ge_BD" = 264;
thm GE_DEF: "ge_DEF" = 265;
term GT: "gt" = 65;
thm GT_T: "gtT" = 266;
thm GT_BD: "gt_BD" = 267;
thm GT_DEF: "gt_DEF" = 268;
term EVEN: "even" = 66;
thm EVEN_T: "evenT" = 269;
thm EVEN_DEF: "even_DEF" = 270;
thm EVEN_SPEC: "EVEN" = 271;
term ODD: "odd" = 67;
thm ODD_T: "oddT" = 272;
thm ODD_DEF: "odd_DEF" = 273;
thm ODD_SPEC: "ODD" = 274;
term SUB: "sub" = 68;
thm SUB_T: "subT" = 276;
thm SUB_DEF: "sub_DEF" = 278;
thm SUB_SPEC: "SUB" = 279;
term TYPEDEF: "TYPEDEF" = 70;
thm TYPEDEF_T: "TYPEDEF_T" = 280;
thm TYPEDEF_DEF: "TYPEDEF_DEF" = 281;
thm AND_DEF1: "AND_DEF1" = 102;
thm EXISTS_THM: "EXISTS_THM" = 158;
thm EU_DEF1: "EU_DEF1" = 156;
thm IMP_ANTISYM_AX: "IMP_ANTISYM_AX" = 103;
thm BOOL_CASES_AX: "BOOL_CASES_AX" = 161;
thm TRUTH: "TRUTH" = 53;
thm NOT_TRUE: "NOT_TRUE" = 152;
thm EM: "em" = 159;
thm PAIR_EQ: "PAIR_EQ" = 178;
thm PAIR_SURJ: "PAIR_SURJ" = 179;
thm FST_THM: "FST" = 183;
thm SND_THM: "SND" = 187;
thm IND_SUC_0: "IND_SUC_0" = 201;
thm IND_SUC_INJ: "IND_SUC_INJ" = 202;
thm NOT_SUC: "NOT_SUC" = 225;
thm SUC_INJ: "SUC_INJ" = 226;
thm NUM_CASES: "num_CASES" = 227;
thm NUM_IND: "num_INDUCTION" = 228;
thm NUM_REC: "num_RECURSION" = 229;
thm MUL1: "MUL1" = 249;
thm LE1: "LE1" = 262;
thm ODD1: "ODD1" = 275;
}
pub fn hol_writer(out: PathBuf, temp: PathBuf) -> io::Result<Mm0Writer> {
#[repr(C, align(8))]
pub struct | <T:?Sized>(T);
static HOL_MMB: &Aligned<[u8]> = &Aligned(*include_bytes!("../../hol.mmb"));
let mmb = MmbFile::parse(&HOL_MMB.0).unwrap();
#[cfg(debug_assertions)] check_consts(&mmb);
Mm0Writer::new(out, temp, &mmb)
}
| Aligned | identifier_name |
import.rs | use std::{io, path::PathBuf};
use std::collections::HashMap;
use crate::mm0::{SortId, TermId, ThmId};
#[cfg(debug_assertions)] use mm0b_parser::BasicMmbFile as MmbFile;
#[cfg(not(debug_assertions))] use mm0b_parser::BareMmbFile as MmbFile;
use super::Mm0Writer;
macro_rules! const_assert { ($cond:expr) => { let _ = [(); 0 - (!($cond) as usize)]; } }
macro_rules! build_consts {
(@CONST sort $name:ident $e:expr) => { pub const $name: SortId = SortId($e); };
(@CONST term $name:ident $e:expr) => { pub const $name: TermId = TermId($e); };
(@CONST thm $name:ident $e:expr) => { pub const $name: ThmId = ThmId($e); };
(@CONST tydef $name:ident $e:expr) => { pub const $name: TydefId = TydefId($e); };
(@INDEX $mmb:expr, sort $name:ident) => { $mmb.sort_index(mm0_const::$name) };
(@INDEX $mmb:expr, term $name:ident) => { $mmb.term_index(mm0_const::$name) };
(@INDEX $mmb:expr, thm $name:ident) => { $mmb.thm_index(mm0_const::$name) };
(@CONJ $mmb:expr, tydef $name:ident: $s:tt) => {};
(@CONJ $mmb:expr, $ty:ident $name:ident: $s:tt) => {
if build_consts!(@INDEX $mmb, $ty $name)?.value()?!= $s { return None }
};
(@GET $index:expr, sort $s:tt) => { $index.sorts.get($s).map(|n| n.0 as u32) };
(@GET $index:expr, term $s:tt) => { $index.terms.get($s).map(|n| n.0) };
(@GET $index:expr, thm $s:tt) => { $index.thms.get($s).map(|n| n.0) };
(@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, tydef $s:tt = $e:expr) => {
writeln!($out, " tydef {}: {} = {};", $name, stringify!($s), $e).unwrap()
};
(@PRINT $print1:ident, $out:expr, $index:expr, $name:expr, $ty:ident $s:tt = $e:expr) => {
$print1(&mut $out, stringify!($ty), $name, $s, build_consts!(@GET $index, $ty $s), $e)
};
(@TYDEFS $(($nil:tt ($name:ident $s:expr, $e:expr)))*) => {
pub const TYDEFS: [
(ThmId, TermId, [(TermId, ThmId); 2], [ThmId; 2]);
[$($nil),*].len()
] = {
const_assert!({
let mut n = 0;
($($e == (n, n += 1).0 &&)* true, n).0
});
use mm0_const::*;
[$($s),*]
};
};
(@FILTER_TYDEFS $(($(tydef $($tydef:literal)?)? $(sort)? $(term)? $(thm)?: $t:tt))*) => {
build_consts! { @TYDEFS $($($($tydef)? (() $t))?)* }
};
($($ty:ident $name:ident: $s:tt = $e:expr;)*) => {
pub mod mm0_const {
use crate::mm0::{SortId, TermId, ThmId, TydefId};
$(build_consts!(@CONST $ty $name $e);)*
}
build_consts! { @FILTER_TYDEFS $(($ty: ($name $s, $e)))* }
#[cfg(debug_assertions)]
fn check_consts(mmb: &MmbFile<'_>) {
#[derive(Default)]
struct Index<'a> {
sorts: HashMap<&'a str, SortId>,
terms: HashMap<&'a str, TermId>,
thms: HashMap<&'a str, ThmId>,
}
#[cold] fn rebuild_consts(mmb: &MmbFile<'_>) {
use std::fmt::Write;
let mut index = Index::default();
for n in (0..mmb.header.num_sorts).map(SortId) {
if let Some(s) = mmb.sort_index(n).and_then(|ix| ix.value()) {
index.sorts.insert(s, n);
}
}
for n in (0..mmb.header.num_terms.get()).map(TermId) {
if let Some(s) = mmb.term_index(n).and_then(|ix| ix.value()) {
index.terms.insert(s, n);
}
}
for n in (0..mmb.header.num_thms.get()).map(ThmId) {
if let Some(s) = mmb.thm_index(n).and_then(|ix| ix.value()) {
index.thms.insert(s, n);
}
}
let mut out = String::new();
fn print1(out: &mut String, ty: &str, name: &str, s: &str, o: Option<u32>, e: u32) {
if let Some(n) = o {
write!(out, " {} {}: {:?} = {};", ty, name, s, n).unwrap();
if n == e { *out += "\n" } else { write!(out, " // not {}\n", e).unwrap() }
} else {
eprintln!("{} {:?} not found", ty, s);
write!(out, " {} {}: {:?} =???;\n", ty, name, s).unwrap();
}
}
$(build_consts!(@PRINT print1, out, index, stringify!($name), $ty $s = $e);)*
eprintln!("build_consts! {{\n{}}}", out);
panic!("Rebuild needed. Put this in'mm0/import.rs'");
}
#[inline]
fn check_consts(mmb: &MmbFile<'_>) -> Option<()> {
$(build_consts!(@CONJ mmb, $ty $name: $s);)*
Some(())
}
if check_consts(mmb).is_none() { rebuild_consts(mmb) }
}
}
}
// This is a list of indexes into the hol.mmb file; it is checked during startup,
// and if any of the indexes are wrong then it will print the appropriate replacement.
// (TODO: use code generation)
build_consts! {
sort WFF: "wff" = 0;
sort TYPE: "type" = 1;
sort TERM: "term" = 2;
term MM0_IM: "im" = 0;
term MM0_AN: "an" = 1;
term TY: "ty" = 4;
term THM: "thm" = 9;
term APP: "app" = 5;
term LAM: "lam" = 6;
term BOOL: "bool" = 2;
term FUN: "fun" = 3;
term EQ: "eq" = 7;
thm EQ_T: "eqT" = 27;
term TRUE: "T" = 11;
thm TRUE_T: "TT" = 51;
thm TRUE_DEF: "T_DEF" = 52;
term AND: "and" = 16;
thm AND_T: "andT" = 68;
thm AND_DEF: "AND_DEF" = 69;
term IMP: "imp" = 18;
thm IMP_T: "impT" = 76;
thm IMP_DEF: "IMP_DEF" = 77;
term ALL: "all" = 20;
thm ALL_T: "allT" = 90;
thm ALL_DEF: "FORALL_DEF" = 91;
term EX: "ex" = 22;
thm EX_T: "exT" = 105;
thm EX_DEF: "EXISTS_DEF" = 106;
term OR: "or" = 24;
thm OR_T: "orT" = 127;
thm OR_DEF: "OR_DEF" = 128;
term FALSE: "F" = 26;
thm FALSE_T: "FT" = 138;
thm FALSE_DEF: "F_DEF" = 139;
term NOT: "not" = 27;
thm NOT_T: "notT" = 142;
thm NOT_DEF: "NOT_DEF" = 143;
term EU: "eu" = 29;
thm EU_T: "euT" = 153;
thm EU_DEF: "EU_DEF" = 154;
thm ETA_AX: "ETA_AX" = 104;
term SEL: "sel" = 15;
thm SEL_T: "selT" = 63;
thm SELECT_AX: "SELECT_AX" = 157;
term COND: "COND" = 30;
thm COND_T: "condT" = 163;
thm COND_DEF: "COND_DEF" = 164;
thm CONJ: "CONJ" = 72;
thm CONJ_PAIR: "CONJ_PAIR" = 73;
thm CONJUNCT1: "CONJUNCT1" = 74;
thm CONJUNCT2: "CONJUNCT2" = 75;
thm REFL: "refl" = 33;
thm AEQ: "aeq" = 34;
thm AEQ1: "aeq1" = 35;
thm AEQ2: "aeq2" = 36;
thm MP: "MP" = 81;
thm DISCH: "DISCH" = 82;
thm UNDISCH: "UNDISCH" = 84;
thm IMP_ANTISYM: "IMP_ANTISYM" = 85;
thm EQ_IMP1: "EQ_IMP1" = 86;
thm EQ_IMP2: "EQ_IMP2" = 87;
thm IMP_ID: "IMP_ID" = 88;
thm IMP_TRANS: "IMP_TRANS" = 89;
thm SPEC: "SPEC" = 97;
thm GEN: "GEN" = 98;
thm CHOOSE: "CHOOSE" = 111;
thm EXISTS: "EXISTS" = 113;
thm DISJ1: "DISJ1" = 132;
thm DISJ2: "DISJ2" = 134;
thm DISJ_CASES: "DISJ_CASES" = 136;
thm CONTR: "CONTR" = 140;
thm NOT_ELIM: "NOT_ELIM" = 147;
thm NOT_INTRO: "NOT_INTRO" = 148;
thm EQF_INTRO: "EQF_INTRO" = 149;
thm EQF_ELIM: "EQF_ELIM" = 150;
thm NOT_FALSE: "NOT_FALSE" = 151;
thm CCONTR: "CCONTR" = 162;
thm PROD_TYBIJ1: "prod_tybij1" = 172;
thm PROD_TYBIJ2: "prod_tybij2" = 173;
thm ONE_ONE_THM: "ONE_ONE" = 192;
thm ONTO_THM: "ONTO" = 197;
thm INF: "inf" = 198;
term MK_PAIR: "mk_pair" = 31;
thm MK_PAIR_T: "mk_pairT" = 165;
thm MK_PAIR_DEF: "mk_pair_DEF" = 166;
term PROD: "prod" = 32;
thm PROD_THM: "PROD_THM" = 168;
term ABS_PROD: "ABS_prod" = 33;
thm ABS_PROD_T: "ABS_prodT" = 170;
term REP_PROD: "REP_prod" = 34;
thm REP_PROD_T: "REP_prodT" = 171;
thm PROD_BIJ1: "PROD_BIJ1" = 174;
thm PROD_BIJ2: "PROD_BIJ2" = 175;
tydef PROD_TYDEF: (PROD_THM, PROD,
[(ABS_PROD, ABS_PROD_T), (REP_PROD, REP_PROD_T)], [PROD_BIJ1, PROD_BIJ2]) = 0;
term PAIR: "pr" = 35;
thm PAIR_T: "prT" = 176;
thm PAIR_DEF: "PAIR_DEF" = 177;
term FST: "fst" = 36;
thm FST_T: "fstT" = 180;
thm FST_DEF: "FST_DEF" = 181;
term SND: "snd" = 37;
thm SND_T: "sndT" = 184;
thm SND_DEF: "SND_DEF" = 185;
term IND: "ind" = 40;
term ONE_ONE: "one_one" = 38;
thm ONE_ONE_T: "one_one_T" = 188;
thm ONE_ONE_BD: "one_one_BD" = 189;
thm ONE_ONE_DEF: "one_one_DEF" = 191;
term ONTO: "onto" = 39;
thm ONTO_T: "onto_T" = 193;
thm ONTO_BD: "onto_BD" = 194;
thm ONTO_DEF: "onto_DEF" = 196;
thm INFINITY_AX: "inf" = 198;
term IND_SUC: "IND_SUC" = 41;
thm IND_SUC_T: "IND_SUC_T" = 199;
thm IND_SUC_DEF: "IND_SUC_DEF" = 200;
term IND_0: "IND_0" = 42;
thm IND_0_T: "IND_0_T" = 203;
thm IND_0_DEF: "IND_0_DEF" = 204;
term NUM_REP: "NUM_REP" = 43;
thm NUM_REP_T: "NUM_REP_T" = 205;
thm NUM_REP_DEF: "NUM_REP_DEF" = 206;
term NUM: "num" = 44;
thm NUM_THM: "NUM_THM" = 207;
term MK_NUM: "mk_num" = 45;
thm MK_NUM_T: "mk_numT" = 208;
term DEST_NUM: "dest_num" = 46;
thm DEST_NUM_T: "dest_numT" = 209;
thm NUM_BIJ1: "NUM_BIJ1" = 212;
thm NUM_BIJ2: "NUM_BIJ2" = 213;
tydef NUM_TYDEF: (NUM_THM, NUM,
[(MK_NUM, MK_NUM_T), (DEST_NUM, DEST_NUM_T)], [NUM_BIJ1, NUM_BIJ2]) = 1;
term ZERO: "_0" = 47;
thm ZERO_T: "_0T" = 214;
thm ZERO_DEF: "_0_DEF" = 215;
term SUC: "suc" = 48;
thm SUC_T: "sucT" = 216;
thm SUC_BD: "suc_BD" = 218;
thm SUC_DEF: "suc_DEF" = 219;
term NUMERAL: "NUMERAL" = 49;
thm NUMERAL_T: "NUMERAL_T" = 220;
thm NUMERAL_BD: "NUMERAL_BD" = 222;
thm NUMERAL_DEF: "NUMERAL_DEF" = 223;
term BIT0: "bit0" = 51;
thm BIT0_T: "bit0T" = 230;
thm BIT0_DEF: "bit0_DEF" = 232;
term BIT1: "bit1" = 52;
thm BIT1_T: "bit1T" = 233;
thm BIT1_BD: "bit1_BD" = 235;
thm BIT1_DEF: "bit1_DEF" = 236;
term PRE: "pre" = 54;
thm PRE_T: "preT" = 238;
thm PRE_DEF: "pre_DEF" = 239;
thm PRE_SPEC: "PRE" = 240;
term ADD: "add" = 55;
thm ADD_T: "addT" = 241;
thm ADD_DEF: "add_DEF" = 243;
thm ADD_SPEC: "ADD" = 244;
term MUL: "mul" = 57;
thm MUL_T: "mulT" = 245;
thm MUL_DEF: "mul_DEF" = 247;
thm MUL_SPEC: "MUL" = 248;
term EXP: "exp" = 59;
thm EXP_T: "expT" = 250;
thm EXP_DEF: "exp_DEF" = 252;
thm EXP_SPEC: "EXP" = 253;
term LE: "le" = 60;
thm LE_T: "leT" = 254;
thm LE_DEF: "le_DEF" = 256;
thm LE_SPEC: "LE" = 257;
term LT: "lt" = 62;
thm LT_T: "ltT" = 258;
thm LT_DEF: "lt_DEF" = 260;
thm LT_SPEC: "LT" = 261;
term GE: "ge" = 64;
thm GE_T: "geT" = 263;
thm GE_BD: "ge_BD" = 264;
thm GE_DEF: "ge_DEF" = 265;
term GT: "gt" = 65;
thm GT_T: "gtT" = 266;
thm GT_BD: "gt_BD" = 267;
thm GT_DEF: "gt_DEF" = 268;
term EVEN: "even" = 66;
thm EVEN_T: "evenT" = 269;
thm EVEN_DEF: "even_DEF" = 270;
thm EVEN_SPEC: "EVEN" = 271;
term ODD: "odd" = 67;
thm ODD_T: "oddT" = 272;
thm ODD_DEF: "odd_DEF" = 273;
thm ODD_SPEC: "ODD" = 274;
term SUB: "sub" = 68;
thm SUB_T: "subT" = 276;
thm SUB_DEF: "sub_DEF" = 278;
thm SUB_SPEC: "SUB" = 279;
term TYPEDEF: "TYPEDEF" = 70;
thm TYPEDEF_T: "TYPEDEF_T" = 280;
thm TYPEDEF_DEF: "TYPEDEF_DEF" = 281;
thm AND_DEF1: "AND_DEF1" = 102;
thm EXISTS_THM: "EXISTS_THM" = 158;
thm EU_DEF1: "EU_DEF1" = 156;
thm IMP_ANTISYM_AX: "IMP_ANTISYM_AX" = 103;
thm BOOL_CASES_AX: "BOOL_CASES_AX" = 161;
thm TRUTH: "TRUTH" = 53;
thm NOT_TRUE: "NOT_TRUE" = 152;
thm EM: "em" = 159;
thm PAIR_EQ: "PAIR_EQ" = 178;
thm PAIR_SURJ: "PAIR_SURJ" = 179;
thm FST_THM: "FST" = 183;
thm SND_THM: "SND" = 187;
thm IND_SUC_0: "IND_SUC_0" = 201;
thm IND_SUC_INJ: "IND_SUC_INJ" = 202;
thm NOT_SUC: "NOT_SUC" = 225;
thm SUC_INJ: "SUC_INJ" = 226;
thm NUM_CASES: "num_CASES" = 227;
thm NUM_IND: "num_INDUCTION" = 228;
thm NUM_REC: "num_RECURSION" = 229;
thm MUL1: "MUL1" = 249;
thm LE1: "LE1" = 262;
thm ODD1: "ODD1" = 275;
}
pub fn hol_writer(out: PathBuf, temp: PathBuf) -> io::Result<Mm0Writer> {
#[repr(C, align(8))]
pub struct Aligned<T:?Sized>(T);
static HOL_MMB: &Aligned<[u8]> = &Aligned(*include_bytes!("../../hol.mmb")); | Mm0Writer::new(out, temp, &mmb)
} | let mmb = MmbFile::parse(&HOL_MMB.0).unwrap();
#[cfg(debug_assertions)] check_consts(&mmb); | random_line_split |
lib.rs | //! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/).
//!
//! # Wait
//! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::main;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: WaitMap<String, i32> = WaitMap::new();
//! # map.insert(String::from("Rosa Luxemburg"), 1);
//!
//! // This will wait until a value is put under the key "Rosa Luxemburg"
//! if let Some(value) = map.wait("Rosa Luxemburg").await {
//! //...
//! }
//! # Ok(())
//! # }
//! ```
//!
//! Waits are cancellable. Cancelled waits evaluate to `None`.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::{main, task};
//! # use std::time::Duration;
//! # use std::sync::Arc;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new());
//! let map1 = map.clone();
//!
//! let handle = task::spawn(async move {
//! let result = map.wait("Voltairine de Cleyre").await;
//! assert!(result.is_none());
//! });
//!
//! task::spawn(async move {
//! task::sleep(Duration::from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v { true } else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
}
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a `WaitMap` key-value pair.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::{Ref, WaitMap};
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new(); | /// assert!(*kv.key() == emma);
/// assert!(*kv.value() == 0);
/// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0));
/// # Ok(())
/// # }
/// ```
pub struct Ref<'a, K, V, S> {
inner: one::Ref<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
}
/// An exclusive reference to a `WaitMap` key-value pair.
pub struct RefMut<'a, K, V, S> {
inner: one::RefMut<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn value_mut(&mut self) -> &mut V {
match self.inner.value_mut() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
pub fn pair_mut(&mut self) -> (&K, &mut V) {
match self.inner.pair_mut() {
(key, Filled(value)) => (key, value),
_ => panic!(),
}
}
} | /// let emma = "Emma Goldman".to_string();
///
/// map.insert(emma.clone(), 0);
/// let kv: Ref<String, i32, _> = map.get(&emma).unwrap();
/// | random_line_split |
lib.rs | //! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/).
//!
//! # Wait
//! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::main;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: WaitMap<String, i32> = WaitMap::new();
//! # map.insert(String::from("Rosa Luxemburg"), 1);
//!
//! // This will wait until a value is put under the key "Rosa Luxemburg"
//! if let Some(value) = map.wait("Rosa Luxemburg").await {
//! //...
//! }
//! # Ok(())
//! # }
//! ```
//!
//! Waits are cancellable. Cancelled waits evaluate to `None`.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::{main, task};
//! # use std::time::Duration;
//! # use std::sync::Arc;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new());
//! let map1 = map.clone();
//!
//! let handle = task::spawn(async move {
//! let result = map.wait("Voltairine de Cleyre").await;
//! assert!(result.is_none());
//! });
//!
//! task::spawn(async move {
//! task::sleep(Duration::from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v { true } else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) |
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a `WaitMap` key-value pair.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::{Ref, WaitMap};
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let emma = "Emma Goldman".to_string();
///
/// map.insert(emma.clone(), 0);
/// let kv: Ref<String, i32, _> = map.get(&emma).unwrap();
///
/// assert!(*kv.key() == emma);
/// assert!(*kv.value() == 0);
/// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0));
/// # Ok(())
/// # }
/// ```
pub struct Ref<'a, K, V, S> {
inner: one::Ref<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
}
/// An exclusive reference to a `WaitMap` key-value pair.
pub struct RefMut<'a, K, V, S> {
inner: one::RefMut<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn value_mut(&mut self) -> &mut V {
match self.inner.value_mut() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
pub fn pair_mut(&mut self) -> (&K, &mut V) {
match self.inner.pair_mut() {
(key, Filled(value)) => (key, value),
_ => panic!(),
}
}
}
| {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
} | identifier_body |
lib.rs | //! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/).
//!
//! # Wait
//! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::main;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: WaitMap<String, i32> = WaitMap::new();
//! # map.insert(String::from("Rosa Luxemburg"), 1);
//!
//! // This will wait until a value is put under the key "Rosa Luxemburg"
//! if let Some(value) = map.wait("Rosa Luxemburg").await {
//! //...
//! }
//! # Ok(())
//! # }
//! ```
//!
//! Waits are cancellable. Cancelled waits evaluate to `None`.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::{main, task};
//! # use std::time::Duration;
//! # use std::sync::Arc;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new());
//! let map1 = map.clone();
//!
//! let handle = task::spawn(async move {
//! let result = map.wait("Voltairine de Cleyre").await;
//! assert!(result.is_none());
//! });
//!
//! task::spawn(async move {
//! task::sleep(Duration::from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v { true } else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
}
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a `WaitMap` key-value pair.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::{Ref, WaitMap};
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let emma = "Emma Goldman".to_string();
///
/// map.insert(emma.clone(), 0);
/// let kv: Ref<String, i32, _> = map.get(&emma).unwrap();
///
/// assert!(*kv.key() == emma);
/// assert!(*kv.value() == 0);
/// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0));
/// # Ok(())
/// # }
/// ```
pub struct Ref<'a, K, V, S> {
inner: one::Ref<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
}
/// An exclusive reference to a `WaitMap` key-value pair.
pub struct RefMut<'a, K, V, S> {
inner: one::RefMut<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn | (&mut self) -> &mut V {
match self.inner.value_mut() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
pub fn pair_mut(&mut self) -> (&K, &mut V) {
match self.inner.pair_mut() {
(key, Filled(value)) => (key, value),
_ => panic!(),
}
}
}
| value_mut | identifier_name |
lib.rs | //! Async concurrent hashmap built on top of [dashmap](https://docs.rs/dashmap/).
//!
//! # Wait
//! [`WaitMap`](crate::WaitMap) is a concurrent hashmap with an asynchronous `wait` operation.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::main;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: WaitMap<String, i32> = WaitMap::new();
//! # map.insert(String::from("Rosa Luxemburg"), 1);
//!
//! // This will wait until a value is put under the key "Rosa Luxemburg"
//! if let Some(value) = map.wait("Rosa Luxemburg").await {
//! //...
//! }
//! # Ok(())
//! # }
//! ```
//!
//! Waits are cancellable. Cancelled waits evaluate to `None`.
//! ```
//! # extern crate async_std;
//! # extern crate waitmap;
//! # use async_std::{main, task};
//! # use std::time::Duration;
//! # use std::sync::Arc;
//! # use waitmap::WaitMap;
//! # #[async_std::main]
//! # async fn main() -> std::io::Result<()> {
//! let map: Arc<WaitMap<String, String>> = Arc::new(WaitMap::new());
//! let map1 = map.clone();
//!
//! let handle = task::spawn(async move {
//! let result = map.wait("Voltairine de Cleyre").await;
//! assert!(result.is_none());
//! });
//!
//! task::spawn(async move {
//! task::sleep(Duration::from_millis(100)).await; // avoid deadlock
//! map1.cancel("Voltairine de Cleyre");
//! });
//!
//! task::block_on(handle);
//! # Ok(())
//! # }
//! ```
mod wait;
mod waker_set;
use std::borrow::Borrow;
use std::collections::hash_map::RandomState;
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::mem;
use dashmap::mapref::entry::Entry::*;
use dashmap::mapref::one;
use dashmap::DashMap;
use wait::{Wait, WaitMut};
use waker_set::WakerSet;
use WaitEntry::*;
/// An asynchronous concurrent hashmap.
pub struct WaitMap<K, V, S = RandomState> {
map: DashMap<K, WaitEntry<V>, S>,
}
impl<K: Hash + Eq, V> WaitMap<K, V> {
/// Make a new `WaitMap` using the default hasher.
pub fn new() -> WaitMap<K, V> {
WaitMap {
map: DashMap::with_hasher(RandomState::default()),
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Clone> WaitMap<K, V, S> {
/// Make a new `WaitMap` using a custom hasher.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::WaitMap;
/// use std::collections::hash_map::RandomState;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<i32, String> = WaitMap::with_hasher(RandomState::new());
/// # Ok(())
/// # }
/// ```
pub fn with_hasher(hasher: S) -> WaitMap<K, V, S> {
WaitMap {
map: DashMap::with_hasher(hasher),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If there are any pending `wait` calls for this key, they are woken up.
///
/// If the map did have this key present, the value is updated and the old value is returned.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, sync::Arc, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: Arc<WaitMap<String, i32>> = Arc::new(WaitMap::new());
///
/// let insert_fut = async { map.insert("hi".to_string(), 0) };
/// let wait_fut = map.wait("hi");
///
/// let (insert_res, wait_res) = insert_fut.join(wait_fut).await;
/// assert!(insert_res.is_none());
/// assert!(wait_res.is_some());
/// # Ok(())
/// # }
/// ```
pub fn insert(&self, key: K, value: V) -> Option<V> {
match self.map.entry(key) {
Occupied(mut entry) => {
match mem::replace(entry.get_mut(), Filled(value)) {
Waiting(wakers) => {
drop(entry); // drop early to release lock before waking other tasks
wakers.wake();
None
}
Filled(value) => Some(value),
}
}
Vacant(slot) => {
slot.insert(Filled(value));
None
}
}
}
pub fn get<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<Ref<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(Ref {
inner: self.map.get(key)?,
})
}
pub fn get_mut<Q:?Sized + Hash + Eq>(&self, key: &Q) -> Option<RefMut<'_, K, V, S>>
where
K: Borrow<Q>,
{
Some(RefMut {
inner: self.map.get_mut(key)?,
})
}
pub fn wait<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<Ref<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
Wait::new(&self.map, qey)
}
pub fn wait_mut<'a: 'f, 'b: 'f, 'f, Q:?Sized + Hash + Eq>(
&'a self,
qey: &'b Q,
) -> impl Future<Output = Option<RefMut<'a, K, V, S>>> + 'f
where
K: Borrow<Q> + From<&'b Q>,
{
let key = K::from(qey);
self.map.entry(key).or_insert(Waiting(WakerSet::new()));
WaitMut::new(&self.map, qey)
}
pub fn cancel<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) =
self.map.remove_if(
key,
|_, entry| {
if let Waiting(_) = entry {
true
} else {
false
}
},
)
{
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn remove<Q:?Sized + Hash + Eq>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
{
if let Some((_, entry)) = self.map.remove(key) {
if let Waiting(wakers) = entry {
wakers.wake();
}
true
} else {
false
}
}
pub fn clear(&self) {
self.map.retain(|_, v| {
if let Waiting(wakers) = v {
mem::replace(wakers, WakerSet::new()).wake();
}
false
});
}
pub fn clear_not_waiting(&self) {
self.map
.retain(|_, v| if let Waiting(_) = v | else { false });
}
pub fn len(&self) -> usize {
self.map.len()
}
/// Cancels all outstanding `waits` on the map.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::{main, stream, prelude::*};
/// # use waitmap::WaitMap;
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let mut waitstream =
/// stream::from_iter(vec![map.wait("we"), map.wait("are"), map.wait("waiting")]);
///
/// map.cancel_all();
///
/// let mut num_cancelled = 0;
/// while let Some(wait_fut) = waitstream.next().await {
/// assert!(wait_fut.await.is_none());
/// num_cancelled += 1;
/// }
///
/// assert!(num_cancelled == 3);
/// # Ok(())
/// # }
/// ```
pub fn cancel_all(&self) {
self.map.retain(|_, entry| {
if let Waiting(wakers) = entry {
// NB: In theory, there is a deadlock risk: if a task is awoken before the
// retain is completed, it may see a waiting entry with an empty waker set,
// rather than a missing entry.
//
// However, this is prevented by the memory guards already present in DashMap.
// No other task will be able to view this entry until the guard on this shard
// has been dropped, which will not occur until this shard's unretained members
// have actually been removed.
mem::replace(wakers, WakerSet::new()).wake();
false
} else {
true
}
})
}
}
enum WaitEntry<V> {
Waiting(WakerSet),
Filled(V),
}
/// A shared reference to a `WaitMap` key-value pair.
/// ```
/// # extern crate async_std;
/// # extern crate waitmap;
/// # use async_std::main;
/// # use waitmap::{Ref, WaitMap};
/// # #[async_std::main]
/// # async fn main() -> std::io::Result<()> {
/// let map: WaitMap<String, i32> = WaitMap::new();
/// let emma = "Emma Goldman".to_string();
///
/// map.insert(emma.clone(), 0);
/// let kv: Ref<String, i32, _> = map.get(&emma).unwrap();
///
/// assert!(*kv.key() == emma);
/// assert!(*kv.value() == 0);
/// assert!(kv.pair() == (&"Emma Goldman".to_string(), &0));
/// # Ok(())
/// # }
/// ```
pub struct Ref<'a, K, V, S> {
inner: one::Ref<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
}
/// An exclusive reference to a `WaitMap` key-value pair.
pub struct RefMut<'a, K, V, S> {
inner: one::RefMut<'a, K, WaitEntry<V>, S>,
}
impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> {
pub fn key(&self) -> &K {
self.inner.key()
}
pub fn value(&self) -> &V {
match self.inner.value() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn value_mut(&mut self) -> &mut V {
match self.inner.value_mut() {
Filled(value) => value,
_ => panic!(),
}
}
pub fn pair(&self) -> (&K, &V) {
(self.key(), self.value())
}
pub fn pair_mut(&mut self) -> (&K, &mut V) {
match self.inner.pair_mut() {
(key, Filled(value)) => (key, value),
_ => panic!(),
}
}
}
| { true } | conditional_block |
maimemo_client.rs | use crate::client::*;
use crate::config::*;
use chrono::Local;
use cookie_store::CookieStore;
use reqwest::Client;
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use std::fmt;
/// notepad包含必要的header info和内容detail
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Notepad { | brief: String,
created_time: Option<String>,
updated_time: Option<String>,
contents: Option<String>,
}
impl Notepad {
pub fn get_notepad_id(&self) -> &str {
&self.notepad_id
}
pub fn set_contents(&mut self, contents: Option<String>) {
self.contents = contents;
}
pub fn get_contents(&self) -> Option<&str> {
self.contents.as_deref()
}
pub fn get_contents_mut(&mut self) -> Option<&mut String> {
self.contents.as_mut()
}
}
impl fmt::Display for Notepad {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if!self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
//?token={user_token}
let url_handler = |url: &str| {
let user_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id_selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
client.login().await.map_err(|e| format!("{:?}", e))?;
Ok(())
}
#[tokio::test]
async fn get_notepad_list() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
assert!(notepads.len() > 0);
Ok(())
}
#[tokio::test]
async fn get_notepad_contents() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
// for notepad in notepads {
let contents = client.get_notepad_contents(¬epads[0].notepad_id).await?;
assert!(contents.len() > 0);
assert!(contents.contains("\n"));
// }
Ok(())
}
#[allow(dead_code)]
fn init_log() {
pretty_env_logger::formatted_builder()
//.format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args()))
.filter_module("dict", log::LevelFilter::Trace)
.init();
}
#[tokio::test]
async fn refresh_captcha() -> Result<(), String> {
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let data = client.refresh_captcha().await?;
assert!(data.len() > 0);
// assert!(path.is_file());
// let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?;
// if let Ok(old_file) = old_file {
// assert_ne!(old_file, new_file);
// }
Ok(())
}
} | is_private: u8,
notepad_id: String,
title: String, | random_line_split |
maimemo_client.rs | use crate::client::*;
use crate::config::*;
use chrono::Local;
use cookie_store::CookieStore;
use reqwest::Client;
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use std::fmt;
/// notepad包含必要的header info和内容detail
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Notepad {
is_private: u8,
notepad_id: String,
title: String,
brief: String,
created_time: Option<String>,
updated_time: Option<String>,
contents: Option<String>,
}
impl Notepad {
pub fn get_notepad_id(&self) -> &str {
&self.notepad_id
}
pub fn set_contents(&mut self, contents: Option<String>) {
self.contents = contents;
}
pub fn get_contents(&self) -> Option<&str> {
self.contents.as_deref()
}
pub fn get_contents_mut(&mut self) -> Option<&mut String> {
self.contents.as_mut()
}
}
impl fmt::Display for Notepad {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if!self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
//?token={user_token}
let url_handler = |url: &str| {
let user_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id | gin().await.map_err(|e| format!("{:?}", e))?;
Ok(())
}
#[tokio::test]
async fn get_notepad_list() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
assert!(notepads.len() > 0);
Ok(())
}
#[tokio::test]
async fn get_notepad_contents() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
// for notepad in notepads {
let contents = client.get_notepad_contents(¬epads[0].notepad_id).await?;
assert!(contents.len() > 0);
assert!(contents.contains("\n"));
// }
Ok(())
}
#[allow(dead_code)]
fn init_log() {
pretty_env_logger::formatted_builder()
//.format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args()))
.filter_module("dict", log::LevelFilter::Trace)
.init();
}
#[tokio::test]
async fn refresh_captcha() -> Result<(), String> {
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let data = client.refresh_captcha().await?;
assert!(data.len() > 0);
// assert!(path.is_file());
// let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?;
// if let Ok(old_file) = old_file {
// assert_ne!(old_file, new_file);
// }
Ok(())
}
}
| _selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
client.lo | identifier_body |
maimemo_client.rs | use crate::client::*;
use crate::config::*;
use chrono::Local;
use cookie_store::CookieStore;
use reqwest::Client;
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use std::fmt;
/// notepad包含必要的header info和内容detail
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Notepad {
is_private: u8,
notepad_id: String,
title: String,
brief: String,
created_time: Option<String>,
updated_time: Option<String>,
contents: Option<String>,
}
impl Notepad {
pub fn get_notepad_id(&self) -> &str {
&self.notepad_id
}
pub fn set_contents(&mut self, contents: Option<String>) {
self.contents = contents;
}
pub fn get_contents(&self) -> Option<&str> {
self.contents.as_deref()
}
pub fn get_contents_mut(&mut self) -> Option<&mut String> {
self.contents.as_mut()
}
}
impl fmt::Display for Notepad {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if!self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
//?token={user_ | ser_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id_selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
client.login().await.map_err(|e| format!("{:?}", e))?;
Ok(())
}
#[tokio::test]
async fn get_notepad_list() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
assert!(notepads.len() > 0);
Ok(())
}
#[tokio::test]
async fn get_notepad_contents() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
// for notepad in notepads {
let contents = client.get_notepad_contents(¬epads[0].notepad_id).await?;
assert!(contents.len() > 0);
assert!(contents.contains("\n"));
// }
Ok(())
}
#[allow(dead_code)]
fn init_log() {
pretty_env_logger::formatted_builder()
//.format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args()))
.filter_module("dict", log::LevelFilter::Trace)
.init();
}
#[tokio::test]
async fn refresh_captcha() -> Result<(), String> {
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let data = client.refresh_captcha().await?;
assert!(data.len() > 0);
// assert!(path.is_file());
// let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?;
// if let Ok(old_file) = old_file {
// assert_ne!(old_file, new_file);
// }
Ok(())
}
}
| token}
let url_handler = |url: &str| {
let u | conditional_block |
maimemo_client.rs | use crate::client::*;
use crate::config::*;
use chrono::Local;
use cookie_store::CookieStore;
use reqwest::Client;
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use std::fmt;
/// notepad包含必要的header info和内容detail
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Notepad {
is_private: u8,
notepad_id: String,
title: String,
brief: String,
created_time: Option<String>,
updated_time: Option<String>,
contents: Option<String>,
}
impl Notepad {
pub fn get_notepad_id(&self) -> &str {
&self.notepad_id
}
pub fn set_contents(&mut self, contents: Option<String>) {
self.contents = contents;
}
pub fn get_contents(&self) -> Option<&str> {
self.contents.as_deref()
}
pub fn get_contents_mut(&mut self) -> Option<&mut String> {
self.contents.as_mut()
}
}
impl fmt::Display for Notepad {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut temp = self.clone();
// 仅输出第一行 与 total length
let contents = temp.contents.as_mut().unwrap();
let total_len = contents.len();
contents.drain(contents.find("\n").unwrap_or(total_len)..);
contents.push_str("... total length: ");
contents.push_str(&total_len.to_string());
write!(f, "{}", serde_json::to_string_pretty(&temp).unwrap())
}
}
#[derive(Debug, Serialize, Deserialize)]
struct ResponseResult {
error: String,
valid: i32,
total: usize,
notepad: Option<Vec<Notepad>>,
}
/// maimemo提供一些访问操作。
pub struct MaimemoClient {
client: Client,
config: AppConfig,
cookie_store: CookieStore,
user_token_name: String,
}
impl std::ops::Drop for MaimemoClient {
/// 在退出时保存cookie store
fn drop(&mut self) {
if let Some(path) = self.config.get_cookie_path() {
if let Err(e) = save_cookie_store(path, &self.cookie_store) {
error!("save cookie store failed: {}", e);
}
}
}
}
impl MaimemoClient {
/// 用config构造一个client。如果config.cookie_path存在则加载,否则使用in memory的cookie store。
pub fn new(config: AppConfig) -> Result<Self, String> {
let cookie_store = build_cookie_store(config.get_cookie_path())?;
Ok(Self {
client: build_general_client()?,
config,
cookie_store: cookie_store,
user_token_name: "userToken".to_string(),
})
}
pub fn get_user_token_val(&self) -> Option<&str> {
self.cookie_store
.get("www.maimemo.com", "/", &self.user_token_name)
.map(|c| c.value())
}
pub fn has_logged(&self) -> bool {
self.get_user_token_val().is_some()
}
/// 登录并更新config.cookies
pub async fn login(&mut self) -> Result<(), String> {
let req_name = "login";
let form = [
("email", self.config.get_username()),
("password", self.config.get_password()),
];
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?;
// login failed
// Check if the user token exists
update_set_cookies(&mut self.cookie_store, &resp);
if!self.has_logged() {
error!(
"update cookie store failed. not found cookie: [{}] in cookie_store",
self.user_token_name
);
Err("login failed. not found cookie store".to_string())
} else {
debug!("login successful");
Ok(())
}
}
/// 提供完整的notepad list调用get_notepad_list与get_notepad_contents
pub async fn get_notepads(&mut self) -> Result<Vec<Notepad>, String> {
let mut notepads = self.get_notepad_list().await?;
for notepad in &mut notepads {
let contents = self.get_notepad_contents(notepad.get_notepad_id()).await?;
notepad.set_contents(Some(contents));
}
Ok(notepads)
}
/// 获取notepad list
pub async fn get_notepad_list(&mut self) -> Result<Vec<Notepad>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-search";
//?token={user_token}
let url_handler = |url: &str| {
let user_token = self.get_user_token_val().expect("not found user token");
url.to_string() + user_token
};
let payload = serde_json::json!({"keyword":null,"scope":"MINE","recommend":false,"offset":0,"limit":30,"total":-1});
let resp = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
Some(&payload),
)
.await?;
let result = resp
.json::<ResponseResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(notepad) = result.notepad {
debug!("got notepad list. len: {}", notepad.len());
Ok(notepad)
} else {
error!("get notepad failed: {:?}", result);
Err("get notepad failed".to_string())
}
}
/// 获取notepad中单词文本
pub async fn get_notepad_contents(&self, notepad_id: &str) -> Result<String, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-detail";
let url_handler = |url: &str| url.to_string() + notepad_id;
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await?;
Self::parse_notepad_text(&resp.text().await.map_err(|e| format!("{:?}", e))?)
}
/// 刷新下载notepad对应的captcha返回文件全路径。
pub async fn refresh_captcha(&self) -> Result<Vec<u8>, String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "service-captcha";
let url_handler = |url: &str| url.to_owned() + &Local::now().timestamp_nanos().to_string();
let resp = send_request_nobody(
&self.config,
&self.client,
&self.cookie_store,
req_name,
url_handler,
)
.await
.map_err(|e| format!("{:?}", e))?;
let contents = resp
.bytes()
.await
.map(|body| body.to_vec())
.map_err(|e| format!("{:?}", e))?;
Ok(contents)
}
/// 保存notepad
///
/// 注意:maimemo要求先获取验证码,再保存。并且要求是同一机器发送的。在win host浏览器刷新验证码,
/// 但在wsl2 保存则不会生效,很可能是对比的发送的数据包是否来自同一机器
pub async fn save_notepad(&self, notepad: Notepad, captcha: String) -> Result<(), String> {
if!self.has_logged() {
return Err("not logged in".to_string());
}
let req_name = "notepad-save";
if notepad.contents.is_none() {
return Err("notepad contents is none".to_string());
}
// form
let mut form = std::collections::HashMap::new();
form.insert("id".to_string(), notepad.notepad_id);
form.insert("title".to_string(), notepad.title);
form.insert("brief".to_string(), notepad.brief);
form.insert("content".to_string(), notepad.contents.unwrap());
form.insert(
"is_private".to_string(),
(notepad.is_private == 1).to_string(),
);
form.insert("captcha".to_string(), captcha);
let form = form
.iter()
.map(|(key, val)| (key.as_str(), val.as_str()))
.collect::<Vec<_>>();
#[derive(Debug, Serialize, Deserialize)]
struct RespResult {
valid: i8,
#[serde(rename = "errorCode")]
error: Option<String>,
}
let result: RespResult = send_request(
&self.config,
&self.client,
&self.cookie_store,
req_name,
|url| url.to_string(),
Some(&form),
)
.await?
.json::<RespResult>()
.await
.map_err(|e| format!("{:?}", e))?;
if let Some(e) = &result.error {
error!("save notepad failed: {:?}", result);
return Err(format!("save notepad failed: {}", e));
}
debug!("save_notepad successful");
Ok(())
}
/// 从response html body中取出单词文本
fn parse_notepad_text(html: &str) -> Result<String, String> {
if html.is_empty() {
return Err("html is empty".to_string());
}
debug!("parsing notepad html");
let id = "#content";
let id_selector = Selector::parse(id).map_err(|e| format!("{:?}", e))?;
let document = Html::parse_document(html);
document
.select(&id_selector)
.next()
.map(|e| e.inner_html())
.ok_or_else(|| {
error!("not found element {} in html: \n{}", id, html);
format!("not found element {} in html", id)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const CONFIG_PATH: &str = "config.yml";
#[tokio::test]
async fn try_login() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
client.login().await.map_err(|e| format!("{:?}", e))?;
Ok(())
}
#[tokio::test]
async fn get_notepad_list() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await | t!(notepads.len() > 0);
Ok(())
}
#[tokio::test]
async fn get_notepad_contents() -> Result<(), String> {
init_log();
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let notepads = client.get_notepad_list().await?;
// for notepad in notepads {
let contents = client.get_notepad_contents(¬epads[0].notepad_id).await?;
assert!(contents.len() > 0);
assert!(contents.contains("\n"));
// }
Ok(())
}
#[allow(dead_code)]
fn init_log() {
pretty_env_logger::formatted_builder()
//.format(|buf, record| writeln!(buf, "{}: {}", record.level(), record.args()))
.filter_module("dict", log::LevelFilter::Trace)
.init();
}
#[tokio::test]
async fn refresh_captcha() -> Result<(), String> {
let config = Config::from_yaml_file(CONFIG_PATH).unwrap();
let mut client = MaimemoClient::new(config.maimemo.unwrap())?;
if!client.has_logged() {
client.login().await?;
}
let data = client.refresh_captcha().await?;
assert!(data.len() > 0);
// assert!(path.is_file());
// let new_file = std::fs::read(path).map_err(|e| format!("{:?}", e))?;
// if let Ok(old_file) = old_file {
// assert_ne!(old_file, new_file);
// }
Ok(())
}
}
| ?;
asser | identifier_name |
process_packet.rs | 120*1000*1000*1000; // 120 seconds
// The jumping off point for all of our logic. This function inspects a packet
// that has come in the tap interface. We do not yet have any idea if we care
// about it; it might not even be TLS. It might not even be TCP!
#[no_mangle]
pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal,
raw_ethframe: *mut c_void,
frame_len: size_t)
{
let mut global = unsafe { &mut *ptr };
let rust_view_len = frame_len as usize;
let rust_view = unsafe {
slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize)
};
global.stats.packets_this_period += 1;
global.stats.bytes_this_period += rust_view_len as u64;
let eth_pkt = match EthernetPacket::new(rust_view) {
Some(pkt) => pkt,
None => return,
};
let eth_payload = eth_pkt.payload();
let ip_data = match eth_pkt.get_ethertype() {
EtherTypes::Vlan => {
if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 {
//let vlan_id: u16 = (eth_payload[0] as u16)*256
// + (eth_payload[1] as u16);
ð_payload[4..]
} else {
return
}
},
EtherTypes::Ipv4 => ð_payload[0..],
_ => return,
};
match Ipv4Packet::new(ip_data) {
Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len),
None => return,
}
}
fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool
{
let payload = tcp_pkt.payload();
payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA
}
impl PerCoreGlobal
{
// frame_len is supposed to be the length of the whole Ethernet frame. We're
// only passing it here for plumbing reasons, and just for stat reporting.
fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol()!= IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination()!= 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN)!= 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if!self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if!self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN)!= 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN)!= 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST)!= 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if!self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
//...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn try_establish_tapdance(&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else |
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if!td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov_err);
self.cli_ssl_driver
.sessions_to_drop.push_back(td.session_id);
}
let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8;
let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8;
if is_a_reconnect {
self.stats.reconns_this_period += 1;
td.send_reconnect_to_client();
td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer,
false);
if td.both_half_closed() { // if errored, must mark for drop
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
} else {
let decoy_ip_str = util::inet_htoa(flow.dst_ip);
info!("newsession {} {}.{}.x.x:{} -> {}:{}",
session_id, src_oct1, src_oct2, flow.src_port,
decoy_ip_str, flow.dst_port);
td.decoy_ip = decoy_ip_str;
if self.overloaded_decoys.contains(&flow.dst_ip) {
td.end_whole_session_error(SessionError::DecoyOverload);
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
}
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id,
src_oct1, src_oct2, flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
} else {
error!("make_forged_tls() returned 0! Tagged TLS not picked up \
as a TapDance stream :(");
false
}
}
pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let mut passive_ssl = EventedSSLEavesdropper::new(session_id);
let ssl_success = passive_ssl.construct_eavesdropped_ssl(
tcp_pkt, master_key, client_random, server_random);
if ssl_success
{
if let Some(rc) = self.id2sess.get(&session_id)
{
let inserted_tok =
self.cli_psv_driver.tok2sess.insert(rc.clone());
let ref mut td = rc.borrow_mut();
if!td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok,
&self.cli_psv_poll)
{
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_uploader_reconnect = false;
// TODO? self.stats.reconns_UPL_this_period += 1;
// TODO? (goes thru bidi) td.send_UPL_reconnect_to_client();
self.flow_tracker.mark_passive_td(flow, rc.clone());
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
}
else
{
error!("This new upload-only stream does not belong to an \
ongoing session. A session's first stream must be \
bidi. Session ID: {}", session_id);
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
report!("error {} {}", session_id,
SessionError::ClientProtocol.to_string());
// (passive_ssl goes out of scope, "deluploader")
false
}
}
else
{
error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \
up as a passive TapDance stream :(");
false
}
}
// Lookup the ongoing session with ID session_id, if it exists. If it does
// not, make a new one (including initiating the Squid TCP connection).
// Returns: Bool is whether the session was already there.
// Option<SessionError> is to be filled if session creation failed.
fn create_or_recall_tapdance_session(&mut self, session_id: SessionId)
-> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>)
{
let ref mut cov_tcp_poll = self.cov_tcp_poll;
let ref mut tok_map = self.cov_tcp_driver.tok2sess;
let recalled = self.id2sess.contains_key(&session_id);
let mut cov_err = None;
let rc = self.id2sess.entry(session_id).or_insert_with(||
{
let td_rc =
Rc::new(RefCell::new(TapdanceSession::new(session_id)));
// New proxy connection to local proxy. unwrap() relies on
// SQUID_PROXY_ADDR being a valid constant.
let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap();
let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT);
// NOTE: this mio version of TcpStream is nonblocking!
if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) {
let ref mut td = td_rc.borrow_mut();
td.cov.set_stream(BufferableTCP::new(sock));
let inserted_tok = tok_map.insert(td_rc.clone());
td.cov.register(cov_tcp_poll, inserted_tok.val(),
util::all_unix_events(), PollOpt::edge())
.unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", | { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
} | conditional_block |
process_packet.rs | 120*1000*1000*1000; // 120 seconds
// The jumping off point for all of our logic. This function inspects a packet
// that has come in the tap interface. We do not yet have any idea if we care
// about it; it might not even be TLS. It might not even be TCP!
#[no_mangle]
pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal,
raw_ethframe: *mut c_void,
frame_len: size_t)
{
let mut global = unsafe { &mut *ptr };
let rust_view_len = frame_len as usize;
let rust_view = unsafe {
slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize)
};
global.stats.packets_this_period += 1;
global.stats.bytes_this_period += rust_view_len as u64;
let eth_pkt = match EthernetPacket::new(rust_view) {
Some(pkt) => pkt,
None => return,
};
let eth_payload = eth_pkt.payload();
let ip_data = match eth_pkt.get_ethertype() {
EtherTypes::Vlan => {
if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 {
//let vlan_id: u16 = (eth_payload[0] as u16)*256
// + (eth_payload[1] as u16);
ð_payload[4..]
} else {
return
}
},
EtherTypes::Ipv4 => ð_payload[0..],
_ => return,
};
match Ipv4Packet::new(ip_data) {
Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len),
None => return,
}
}
fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool
{
let payload = tcp_pkt.payload();
payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA
}
impl PerCoreGlobal
{
// frame_len is supposed to be the length of the whole Ethernet frame. We're
// only passing it here for plumbing reasons, and just for stat reporting.
fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol()!= IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination()!= 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN)!= 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if!self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if!self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN)!= 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN)!= 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST)!= 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if!self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
//...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn try_establish_tapdance(&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
}
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
| return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov_err);
self.cli_ssl_driver
.sessions_to_drop.push_back(td.session_id);
}
let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8;
let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8;
if is_a_reconnect {
self.stats.reconns_this_period += 1;
td.send_reconnect_to_client();
td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer,
false);
if td.both_half_closed() { // if errored, must mark for drop
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
} else {
let decoy_ip_str = util::inet_htoa(flow.dst_ip);
info!("newsession {} {}.{}.x.x:{} -> {}:{}",
session_id, src_oct1, src_oct2, flow.src_port,
decoy_ip_str, flow.dst_port);
td.decoy_ip = decoy_ip_str;
if self.overloaded_decoys.contains(&flow.dst_ip) {
td.end_whole_session_error(SessionError::DecoyOverload);
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
}
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id,
src_oct1, src_oct2, flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
} else {
error!("make_forged_tls() returned 0! Tagged TLS not picked up \
as a TapDance stream :(");
false
}
}
pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let mut passive_ssl = EventedSSLEavesdropper::new(session_id);
let ssl_success = passive_ssl.construct_eavesdropped_ssl(
tcp_pkt, master_key, client_random, server_random);
if ssl_success
{
if let Some(rc) = self.id2sess.get(&session_id)
{
let inserted_tok =
self.cli_psv_driver.tok2sess.insert(rc.clone());
let ref mut td = rc.borrow_mut();
if!td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok,
&self.cli_psv_poll)
{
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_uploader_reconnect = false;
// TODO? self.stats.reconns_UPL_this_period += 1;
// TODO? (goes thru bidi) td.send_UPL_reconnect_to_client();
self.flow_tracker.mark_passive_td(flow, rc.clone());
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
}
else
{
error!("This new upload-only stream does not belong to an \
ongoing session. A session's first stream must be \
bidi. Session ID: {}", session_id);
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
report!("error {} {}", session_id,
SessionError::ClientProtocol.to_string());
// (passive_ssl goes out of scope, "deluploader")
false
}
}
else
{
error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \
up as a passive TapDance stream :(");
false
}
}
// Lookup the ongoing session with ID session_id, if it exists. If it does
// not, make a new one (including initiating the Squid TCP connection).
// Returns: Bool is whether the session was already there.
// Option<SessionError> is to be filled if session creation failed.
fn create_or_recall_tapdance_session(&mut self, session_id: SessionId)
-> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>)
{
let ref mut cov_tcp_poll = self.cov_tcp_poll;
let ref mut tok_map = self.cov_tcp_driver.tok2sess;
let recalled = self.id2sess.contains_key(&session_id);
let mut cov_err = None;
let rc = self.id2sess.entry(session_id).or_insert_with(||
{
let td_rc =
Rc::new(RefCell::new(TapdanceSession::new(session_id)));
// New proxy connection to local proxy. unwrap() relies on
// SQUID_PROXY_ADDR being a valid constant.
let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap();
let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT);
// NOTE: this mio version of TcpStream is nonblocking!
if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) {
let ref mut td = td_rc.borrow_mut();
td.cov.set_stream(BufferableTCP::new(sock));
let inserted_tok = tok_map.insert(td_rc.clone());
td.cov.register(cov_tcp_poll, inserted_tok.val(),
util::all_unix_events(), PollOpt::edge())
.unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", | {
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if !td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol); | identifier_body |
process_packet.rs | = 120*1000*1000*1000; // 120 seconds
// The jumping off point for all of our logic. This function inspects a packet
// that has come in the tap interface. We do not yet have any idea if we care
// about it; it might not even be TLS. It might not even be TCP!
#[no_mangle]
pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal,
raw_ethframe: *mut c_void,
frame_len: size_t)
{
let mut global = unsafe { &mut *ptr };
let rust_view_len = frame_len as usize;
let rust_view = unsafe {
slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize)
};
global.stats.packets_this_period += 1;
global.stats.bytes_this_period += rust_view_len as u64;
let eth_pkt = match EthernetPacket::new(rust_view) {
Some(pkt) => pkt,
None => return,
};
let eth_payload = eth_pkt.payload();
let ip_data = match eth_pkt.get_ethertype() {
EtherTypes::Vlan => {
if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 {
//let vlan_id: u16 = (eth_payload[0] as u16)*256 | },
EtherTypes::Ipv4 => ð_payload[0..],
_ => return,
};
match Ipv4Packet::new(ip_data) {
Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len),
None => return,
}
}
fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool
{
let payload = tcp_pkt.payload();
payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA
}
impl PerCoreGlobal
{
// frame_len is supposed to be the length of the whole Ethernet frame. We're
// only passing it here for plumbing reasons, and just for stat reporting.
fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol()!= IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination()!= 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN)!= 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if!self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if!self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN)!= 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN)!= 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST)!= 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if!self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
//...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn try_establish_tapdance(&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
}
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if!td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov_err);
self.cli_ssl_driver
.sessions_to_drop.push_back(td.session_id);
}
let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8;
let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8;
if is_a_reconnect {
self.stats.reconns_this_period += 1;
td.send_reconnect_to_client();
td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer,
false);
if td.both_half_closed() { // if errored, must mark for drop
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
} else {
let decoy_ip_str = util::inet_htoa(flow.dst_ip);
info!("newsession {} {}.{}.x.x:{} -> {}:{}",
session_id, src_oct1, src_oct2, flow.src_port,
decoy_ip_str, flow.dst_port);
td.decoy_ip = decoy_ip_str;
if self.overloaded_decoys.contains(&flow.dst_ip) {
td.end_whole_session_error(SessionError::DecoyOverload);
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
}
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id,
src_oct1, src_oct2, flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
} else {
error!("make_forged_tls() returned 0! Tagged TLS not picked up \
as a TapDance stream :(");
false
}
}
pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let mut passive_ssl = EventedSSLEavesdropper::new(session_id);
let ssl_success = passive_ssl.construct_eavesdropped_ssl(
tcp_pkt, master_key, client_random, server_random);
if ssl_success
{
if let Some(rc) = self.id2sess.get(&session_id)
{
let inserted_tok =
self.cli_psv_driver.tok2sess.insert(rc.clone());
let ref mut td = rc.borrow_mut();
if!td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok,
&self.cli_psv_poll)
{
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_uploader_reconnect = false;
// TODO? self.stats.reconns_UPL_this_period += 1;
// TODO? (goes thru bidi) td.send_UPL_reconnect_to_client();
self.flow_tracker.mark_passive_td(flow, rc.clone());
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
}
else
{
error!("This new upload-only stream does not belong to an \
ongoing session. A session's first stream must be \
bidi. Session ID: {}", session_id);
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
report!("error {} {}", session_id,
SessionError::ClientProtocol.to_string());
// (passive_ssl goes out of scope, "deluploader")
false
}
}
else
{
error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \
up as a passive TapDance stream :(");
false
}
}
// Lookup the ongoing session with ID session_id, if it exists. If it does
// not, make a new one (including initiating the Squid TCP connection).
// Returns: Bool is whether the session was already there.
// Option<SessionError> is to be filled if session creation failed.
fn create_or_recall_tapdance_session(&mut self, session_id: SessionId)
-> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>)
{
let ref mut cov_tcp_poll = self.cov_tcp_poll;
let ref mut tok_map = self.cov_tcp_driver.tok2sess;
let recalled = self.id2sess.contains_key(&session_id);
let mut cov_err = None;
let rc = self.id2sess.entry(session_id).or_insert_with(||
{
let td_rc =
Rc::new(RefCell::new(TapdanceSession::new(session_id)));
// New proxy connection to local proxy. unwrap() relies on
// SQUID_PROXY_ADDR being a valid constant.
let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap();
let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT);
// NOTE: this mio version of TcpStream is nonblocking!
if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) {
let ref mut td = td_rc.borrow_mut();
td.cov.set_stream(BufferableTCP::new(sock));
let inserted_tok = tok_map.insert(td_rc.clone());
td.cov.register(cov_tcp_poll, inserted_tok.val(),
util::all_unix_events(), PollOpt::edge())
.unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", e); | // + (eth_payload[1] as u16);
ð_payload[4..]
} else {
return
} | random_line_split |
process_packet.rs | 120*1000*1000*1000; // 120 seconds
// The jumping off point for all of our logic. This function inspects a packet
// that has come in the tap interface. We do not yet have any idea if we care
// about it; it might not even be TLS. It might not even be TCP!
#[no_mangle]
pub extern "C" fn rust_process_packet(ptr: *mut PerCoreGlobal,
raw_ethframe: *mut c_void,
frame_len: size_t)
{
let mut global = unsafe { &mut *ptr };
let rust_view_len = frame_len as usize;
let rust_view = unsafe {
slice::from_raw_parts_mut(raw_ethframe as *mut u8, frame_len as usize)
};
global.stats.packets_this_period += 1;
global.stats.bytes_this_period += rust_view_len as u64;
let eth_pkt = match EthernetPacket::new(rust_view) {
Some(pkt) => pkt,
None => return,
};
let eth_payload = eth_pkt.payload();
let ip_data = match eth_pkt.get_ethertype() {
EtherTypes::Vlan => {
if eth_payload[2] == 0x08 && eth_payload[3] == 0x00 {
//let vlan_id: u16 = (eth_payload[0] as u16)*256
// + (eth_payload[1] as u16);
ð_payload[4..]
} else {
return
}
},
EtherTypes::Ipv4 => ð_payload[0..],
_ => return,
};
match Ipv4Packet::new(ip_data) {
Some(pkt) => global.process_ipv4_packet(pkt, rust_view_len),
None => return,
}
}
fn is_tls_app_pkt(tcp_pkt: &TcpPacket) -> bool
{
let payload = tcp_pkt.payload();
payload.len() > 5 && payload[0] == TLS_TYPE_APPLICATION_DATA
}
impl PerCoreGlobal
{
// frame_len is supposed to be the length of the whole Ethernet frame. We're
// only passing it here for plumbing reasons, and just for stat reporting.
fn process_ipv4_packet(&mut self, ip_pkt: Ipv4Packet, frame_len: usize)
{
// Ignore packets that aren't TCP
if ip_pkt.get_next_level_protocol()!= IpNextHeaderProtocols::Tcp {
return;
}
let tcp_pkt = match TcpPacket::new(ip_pkt.payload()) {
Some(pkt) => pkt,
None => return,
};
self.stats.tcp_packets_this_period += 1;
// Ignore packets that aren't -> 443.
// libpnet getters all return host order. Ignore the "u16be" in their
// docs; interactions with pnet are purely host order.
if tcp_pkt.get_destination()!= 443 {
return;
}
self.stats.tls_packets_this_period += 1; // (HTTPS, really)
self.stats.tls_bytes_this_period += frame_len as u64;
self.process_tls_pkt(&ip_pkt, &tcp_pkt);
}
// Takes an IPv4 packet
// Assumes (for now) that TLS records are in a single TCP packet
// (no fragmentation).
// Fragments could be stored in the flow_tracker if needed.
pub fn process_tls_pkt(&mut self,
ip_pkt: &Ipv4Packet,
tcp_pkt: &TcpPacket)
{
let flow = Flow::new(ip_pkt, tcp_pkt);
if panic::catch_unwind(||{ tcp_pkt.payload(); }).is_err() {
return;
}
let tcp_flags = tcp_pkt.get_flags();
if (tcp_flags & TcpFlags::SYN)!= 0 && (tcp_flags & TcpFlags::ACK) == 0
{
self.stats.port_443_syns_this_period += 1;
self.flow_tracker.begin_tracking_flow(&flow,
tcp_pkt.packet().to_vec());
return;
}
if!self.flow_tracker.tracking_at_all(&flow) {
return;
}
// Note that FINs and RSTs are welcome in consume_tcp_pkt() as well.
if!self.flow_tracker.consume_tcp_pkt_if_passive(&flow, tcp_pkt) {
// EventedSSLEavesdropped::consume_tcp_pkt() said to drop the flow.
self.flow_tracker.drop(&flow);
}
else if self.flow_tracker.is_td(&flow) {
// Forward packets from established overt flows into the tun
// interface, so that they'll reach forge_socket.
self.forward_to_forge_socket(ip_pkt);
if (tcp_flags & TcpFlags::FIN)!= 0 {
// This stream (overt flow) is ending. The client might come and
// resume the TapDance session with a new stream, so leave the
// overall session state intact. The is_hup event's processing
// takes care of starting the BufferableSSL's cleanup.
// FlowTracker::notice_fin() will schedule a RST to be sent to
// the decoy server; forge_socket handles the FIN handshake.
self.flow_tracker.notice_fin(&flow);
}
}
else if (tcp_flags & TcpFlags::FIN)!= 0 { // non-TD flow FINd => drop
self.flow_tracker.drop(&flow);
return;
}
if (tcp_flags & TcpFlags::RST)!= 0 {
// End connection, remove any relevant state.
// TODO clean up TapDance session state, if any
// (TODO i believe that the earlier forward_to_forge_socket would
// cause a clien is_error event to fire, which would then clean up
// the session. should confirm.)
self.flow_tracker.drop(&flow);
return;
}
// This is a non-RST/FIN packet of a flow we are tracking, but that is
// not known to be TapDance. That means this might be a tag-bearing
// first TLS app data packet: establish a TD session if so.
if!self.flow_tracker.is_td(&flow) && is_tls_app_pkt(tcp_pkt) {
//...buuut don't bother checking these known-irrelevant addresses:
// coming from U. Michigan (35.0.0.0/9)
// going to Google CDN servers in Michigan (192.122.185.0/24)
// coming from windyegret's internet connection (192.122.200.253)
// coming from more of U. Michigan (141.212.0.0/14)
let src = ip_pkt.get_source().octets();
let dest = ip_pkt.get_destination().octets();
if src[0] == 35 && (src[1] & 128 == 0) ||
dest[0] == 192 && dest[1] == 122 && dest[2] == 185 ||
src[0]==192 && src[1]==122 && src[2]==200 && src[3]==253 ||
src[0] == 141 && (src[2] & 252) == 212 ||
!self.try_establish_tapdance(&flow, tcp_pkt)
{
// No tag in first TLS app data packet ==> definitely not TD.
self.flow_tracker.drop(&flow);
}
}
}
fn forward_to_forge_socket(&mut self, ip_pkt: &Ipv4Packet)
{
let ip_len = ip_pkt.packet().len();
// TODO: see if the PI flag to the TUN interface can actually take care
// of this for us
let mut tun_pkt = Vec::with_capacity(ip_len+4);
// These mystery bytes are a link-layer header; the kernel "receives"
// tun packets as if they were really physically "received". Since they
// weren't physically received, they do not have an Ethernet header. It
// looks like the tun setup has its own type of header, rather than just
// making up a fake Ethernet header.
tun_pkt.extend_from_slice(&[0x00, 0x01, 0x08, 0x00]);
tun_pkt.extend_from_slice(ip_pkt.packet());
// Send into tun device (can fail, but these are best-effort IP packets)
self.tun.send(tun_pkt).unwrap_or_else(|e|{
warn!("failed to send packet into tun: {}", e); 0});
self.stats.cli2cov_raw_etherbytes_this_period += ip_len as u64;
}
// Inspects a TLS app data packet for a TapDance tag. If found, establishes
// the flow as a TapDance stream (and starts a new session, if 1st stream).
// Returns true iff a TapDance stream was successfully established.
fn | (&mut self,
flow: &Flow,
tcp_pkt: &TcpPacket) -> bool
{
let tag_payload = elligator::extract_telex_tag(&self.priv_key,
&tcp_pkt.payload());
self.stats.elligator_this_period += 1;
if tag_payload.len() < TAG_FLAGS_LEN + TAG_M_KEY_LEN + TAG_SRV_RND_LEN +
TAG_CLI_RND_LEN + TAG_CON_ID_LEN
{
return false;
}
if tag_payload[0] & 128u8 == 0 { // traditional bidi forged TLS
// Decoy will ACK current packet with this value. (Host-order).
let expect_ack =
tcp_pkt.get_sequence()
.wrapping_add(tcp_pkt.payload().len() as u32);
let wscale_and_mss =
self.flow_tracker.mark_tapdance_flow(flow, expect_ack);
self.establish_bidi(tcp_pkt, flow, &tag_payload, wscale_and_mss)
} else { // upload-only eavesdropped TLS
// (don't mark as TD in FlowTracker until you have the Rc<RefCell>)
self.establish_upload_only(tcp_pkt, flow, &tag_payload)
}
}
pub fn establish_bidi(&mut self,
tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>,
wscale_and_mss: WscaleAndMSS) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let (tcp_ts, tcp_ts_ecr) = util::get_tcp_timestamps(tcp_pkt);
let mut client_ssl = BufferableSSL::new(session_id);
let ssl_success =
client_ssl.construct_forged_ssl(
tcp_pkt, flow, &wscale_and_mss, tcp_ts, tcp_ts_ecr,
master_key, client_random, server_random);
if ssl_success {
let (is_a_reconnect, rc, cov_error) =
self.create_or_recall_tapdance_session(session_id);
let ref mut td = rc.borrow_mut();
let tok = self.cli_ssl_driver.tok2sess.insert(rc.clone());
if!td.cli_pair.set_bidi(client_ssl, tok, &mut self.cli_ssl_poll) {
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_bidi_reconnect = false;
if let Some(cov_err) = cov_error {
td.end_whole_session_error(cov_err);
self.cli_ssl_driver
.sessions_to_drop.push_back(td.session_id);
}
let src_oct1: u8 = ((flow.src_ip & 0xff000000u32) >> 24) as u8;
let src_oct2: u8 = ((flow.src_ip & 0x00ff0000u32) >> 16) as u8;
if is_a_reconnect {
self.stats.reconns_this_period += 1;
td.send_reconnect_to_client();
td.cov_read_cli_write(&mut self.cov_tcp_driver.rereg_queuer,
false);
if td.both_half_closed() { // if errored, must mark for drop
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
} else {
let decoy_ip_str = util::inet_htoa(flow.dst_ip);
info!("newsession {} {}.{}.x.x:{} -> {}:{}",
session_id, src_oct1, src_oct2, flow.src_port,
decoy_ip_str, flow.dst_port);
td.decoy_ip = decoy_ip_str;
if self.overloaded_decoys.contains(&flow.dst_ip) {
td.end_whole_session_error(SessionError::DecoyOverload);
self.cli_ssl_driver.sessions_to_drop
.push_back(td.session_id);
}
}
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
info!("newstream {} {}.{}.x.x:{} -> {}:{}", session_id,
src_oct1, src_oct2, flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
} else {
error!("make_forged_tls() returned 0! Tagged TLS not picked up \
as a TapDance stream :(");
false
}
}
pub fn establish_upload_only(&mut self, tcp_pkt: &TcpPacket, flow: &Flow,
tag_payload: &Vec<u8>) -> bool
{
let (_, master_key, server_random, client_random, session_id) =
parse_tag_payload(tag_payload);
let mut passive_ssl = EventedSSLEavesdropper::new(session_id);
let ssl_success = passive_ssl.construct_eavesdropped_ssl(
tcp_pkt, master_key, client_random, server_random);
if ssl_success
{
if let Some(rc) = self.id2sess.get(&session_id)
{
let inserted_tok =
self.cli_psv_driver.tok2sess.insert(rc.clone());
let ref mut td = rc.borrow_mut();
if!td.cli_pair.set_passive_uploader(passive_ssl, inserted_tok,
&self.cli_psv_poll)
{
td.end_whole_session_error(SessionError::ClientProtocol);
return false;
}
td.expect_uploader_reconnect = false;
// TODO? self.stats.reconns_UPL_this_period += 1;
// TODO? (goes thru bidi) td.send_UPL_reconnect_to_client();
self.flow_tracker.mark_passive_td(flow, rc.clone());
self.cli_ssl_driver.stream_timeouts.push_back(
SchedSessionTimeout {
drop_time: precise_time_ns() + STREAM_TIMEOUT_NS,
id: session_id,
stream_count: td.cli_pair.stream_count() });
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
true
}
else
{
error!("This new upload-only stream does not belong to an \
ongoing session. A session's first stream must be \
bidi. Session ID: {}", session_id);
report!("newuploader {} {}:{} -> {}:{}", session_id,
util::inet_htoa(flow.src_ip), flow.src_port,
util::inet_htoa(flow.dst_ip), flow.dst_port);
report!("error {} {}", session_id,
SessionError::ClientProtocol.to_string());
// (passive_ssl goes out of scope, "deluploader")
false
}
}
else
{
error!("make_forged_memory_tls() returned 0! Tagged TLS not picked \
up as a passive TapDance stream :(");
false
}
}
// Lookup the ongoing session with ID session_id, if it exists. If it does
// not, make a new one (including initiating the Squid TCP connection).
// Returns: Bool is whether the session was already there.
// Option<SessionError> is to be filled if session creation failed.
fn create_or_recall_tapdance_session(&mut self, session_id: SessionId)
-> (bool, Rc<RefCell<TapdanceSession>>, Option<SessionError>)
{
let ref mut cov_tcp_poll = self.cov_tcp_poll;
let ref mut tok_map = self.cov_tcp_driver.tok2sess;
let recalled = self.id2sess.contains_key(&session_id);
let mut cov_err = None;
let rc = self.id2sess.entry(session_id).or_insert_with(||
{
let td_rc =
Rc::new(RefCell::new(TapdanceSession::new(session_id)));
// New proxy connection to local proxy. unwrap() relies on
// SQUID_PROXY_ADDR being a valid constant.
let dest = IpAddr::from_str(SQUID_PROXY_ADDR).unwrap();
let sock_addr = SocketAddr::new(dest, SQUID_PROXY_PORT);
// NOTE: this mio version of TcpStream is nonblocking!
if let Ok(sock) = ::mio::tcp::TcpStream::connect(&sock_addr) {
let ref mut td = td_rc.borrow_mut();
td.cov.set_stream(BufferableTCP::new(sock));
let inserted_tok = tok_map.insert(td_rc.clone());
td.cov.register(cov_tcp_poll, inserted_tok.val(),
util::all_unix_events(), PollOpt::edge())
.unwrap_or_else(|e|{error!("tcp_driver 1st reg: {}", | try_establish_tapdance | identifier_name |
agent.rs | use crate::agent::Capabilities;
use crate::experiments::{Assignee, Experiment};
use crate::prelude::*;
use crate::results::{DatabaseDB, EncodingType, ProgressData};
use crate::server::api_types::{AgentConfig, ApiResponse};
use crate::server::auth::{auth_filter, AuthDetails, TokenType};
use crate::server::messages::Message;
use crate::server::{Data, GithubData, HttpError};
use crossbeam_channel::Sender;
use failure::Compat;
use http::Response;
use hyper::Body;
use std::collections::HashMap;
use std::sync::{Arc, Condvar, Mutex};
use warp::{self, Filter, Rejection};
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct ExperimentData<T> {
experiment_name: String,
#[serde(flatten)]
data: T,
}
pub fn routes(
data: Arc<Data>,
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
//
// Ignore the mutex guard (see above).
drop(
self.in_flight_requests
.1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g!= 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct RequestGuard {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
match data
.record_progress_worker
.queue
.try_send((result, auth.name))
{
Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?),
Err(crossbeam_channel::TrySendError::Full(_)) => {
data.metrics.crater_bounced_record_progress.inc_by(1);
Ok(ApiResponse::<()>::SlowDown.into_response()?)
}
Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(),
}
}
fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> {
if let Some(rev) = auth.git_revision {
data.agents.set_git_revision(&auth.name, &rev)?;
}
data.agents.record_heartbeat(&auth.name)?;
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn endpoint_error(
error: ExperimentData<HashMap<String, String>>,
mutex: Arc<Mutex<Data>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
log::error!(
"agent {} failed while running {}: {:?}",
auth.name,
error.experiment_name,
error.data.get("error")
);
let data = mutex.lock().unwrap();
let ex = Experiment::get(&data.db, &error.experiment_name)?
.ok_or_else(|| err_msg("no experiment run by this agent"))?;
data.metrics.record_error(&auth.name, &ex.name);
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> {
match resp {
Ok(resp) => resp,
Err(err) => ApiResponse::internal_error(err.to_string())
.into_response()
.unwrap(),
}
}
async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> | {
let error = if let Some(compat) = err.find::<Compat<HttpError>>() {
Some(*compat.get_ref())
} else if err.is_not_found() {
Some(HttpError::NotFound)
} else {
None
};
match error {
Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()),
Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()),
None => Err(err),
}
} | identifier_body |
|
agent.rs | use crate::agent::Capabilities;
use crate::experiments::{Assignee, Experiment};
use crate::prelude::*;
use crate::results::{DatabaseDB, EncodingType, ProgressData};
use crate::server::api_types::{AgentConfig, ApiResponse};
use crate::server::auth::{auth_filter, AuthDetails, TokenType};
use crate::server::messages::Message;
use crate::server::{Data, GithubData, HttpError};
use crossbeam_channel::Sender;
use failure::Compat;
use http::Response;
use hyper::Body;
use std::collections::HashMap;
use std::sync::{Arc, Condvar, Mutex};
use warp::{self, Filter, Rejection};
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct ExperimentData<T> {
experiment_name: String,
#[serde(flatten)]
data: T,
}
pub fn routes(
data: Arc<Data>,
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
// | .1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g!= 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct RequestGuard {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
match data
.record_progress_worker
.queue
.try_send((result, auth.name))
{
Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?),
Err(crossbeam_channel::TrySendError::Full(_)) => {
data.metrics.crater_bounced_record_progress.inc_by(1);
Ok(ApiResponse::<()>::SlowDown.into_response()?)
}
Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(),
}
}
fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> {
if let Some(rev) = auth.git_revision {
data.agents.set_git_revision(&auth.name, &rev)?;
}
data.agents.record_heartbeat(&auth.name)?;
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn endpoint_error(
error: ExperimentData<HashMap<String, String>>,
mutex: Arc<Mutex<Data>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
log::error!(
"agent {} failed while running {}: {:?}",
auth.name,
error.experiment_name,
error.data.get("error")
);
let data = mutex.lock().unwrap();
let ex = Experiment::get(&data.db, &error.experiment_name)?
.ok_or_else(|| err_msg("no experiment run by this agent"))?;
data.metrics.record_error(&auth.name, &ex.name);
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> {
match resp {
Ok(resp) => resp,
Err(err) => ApiResponse::internal_error(err.to_string())
.into_response()
.unwrap(),
}
}
async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> {
let error = if let Some(compat) = err.find::<Compat<HttpError>>() {
Some(*compat.get_ref())
} else if err.is_not_found() {
Some(HttpError::NotFound)
} else {
None
};
match error {
Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()),
Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()),
None => Err(err),
}
} | // Ignore the mutex guard (see above).
drop(
self.in_flight_requests | random_line_split |
agent.rs | use crate::agent::Capabilities;
use crate::experiments::{Assignee, Experiment};
use crate::prelude::*;
use crate::results::{DatabaseDB, EncodingType, ProgressData};
use crate::server::api_types::{AgentConfig, ApiResponse};
use crate::server::auth::{auth_filter, AuthDetails, TokenType};
use crate::server::messages::Message;
use crate::server::{Data, GithubData, HttpError};
use crossbeam_channel::Sender;
use failure::Compat;
use http::Response;
use hyper::Body;
use std::collections::HashMap;
use std::sync::{Arc, Condvar, Mutex};
use warp::{self, Filter, Rejection};
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct ExperimentData<T> {
experiment_name: String,
#[serde(flatten)]
data: T,
}
pub fn routes(
data: Arc<Data>,
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else | ;
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
//
// Ignore the mutex guard (see above).
drop(
self.in_flight_requests
.1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g!= 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct RequestGuard {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
match data
.record_progress_worker
.queue
.try_send((result, auth.name))
{
Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?),
Err(crossbeam_channel::TrySendError::Full(_)) => {
data.metrics.crater_bounced_record_progress.inc_by(1);
Ok(ApiResponse::<()>::SlowDown.into_response()?)
}
Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(),
}
}
fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> {
if let Some(rev) = auth.git_revision {
data.agents.set_git_revision(&auth.name, &rev)?;
}
data.agents.record_heartbeat(&auth.name)?;
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn endpoint_error(
error: ExperimentData<HashMap<String, String>>,
mutex: Arc<Mutex<Data>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
log::error!(
"agent {} failed while running {}: {:?}",
auth.name,
error.experiment_name,
error.data.get("error")
);
let data = mutex.lock().unwrap();
let ex = Experiment::get(&data.db, &error.experiment_name)?
.ok_or_else(|| err_msg("no experiment run by this agent"))?;
data.metrics.record_error(&auth.name, &ex.name);
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> {
match resp {
Ok(resp) => resp,
Err(err) => ApiResponse::internal_error(err.to_string())
.into_response()
.unwrap(),
}
}
async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> {
let error = if let Some(compat) = err.find::<Compat<HttpError>>() {
Some(*compat.get_ref())
} else if err.is_not_found() {
Some(HttpError::NotFound)
} else {
None
};
match error {
Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()),
Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()),
None => Err(err),
}
}
| {
None
} | conditional_block |
agent.rs | use crate::agent::Capabilities;
use crate::experiments::{Assignee, Experiment};
use crate::prelude::*;
use crate::results::{DatabaseDB, EncodingType, ProgressData};
use crate::server::api_types::{AgentConfig, ApiResponse};
use crate::server::auth::{auth_filter, AuthDetails, TokenType};
use crate::server::messages::Message;
use crate::server::{Data, GithubData, HttpError};
use crossbeam_channel::Sender;
use failure::Compat;
use http::Response;
use hyper::Body;
use std::collections::HashMap;
use std::sync::{Arc, Condvar, Mutex};
use warp::{self, Filter, Rejection};
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct ExperimentData<T> {
experiment_name: String,
#[serde(flatten)]
data: T,
}
pub fn routes(
data: Arc<Data>,
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
) -> impl Filter<Extract = (Response<Body>,), Error = Rejection> + Clone {
let data_cloned = data.clone();
let data_filter = warp::any().map(move || data_cloned.clone());
let mutex_filter = warp::any().map(move || mutex.clone());
let github_data_filter = warp::any().map(move || github_data.clone());
let config = warp::post()
.and(warp::path("config"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_config);
let next_experiment = warp::post()
.and(warp::path("next-experiment"))
.and(warp::path::end())
.and(mutex_filter.clone())
.and(github_data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_experiment);
let next_crate = warp::post()
.and(warp::path("next-crate"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_next_crate);
let record_progress = warp::post()
.and(warp::path("record-progress"))
.and(warp::path::end())
.and(warp::body::json())
.and(data_filter.clone())
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_record_progress);
let heartbeat = warp::post()
.and(warp::path("heartbeat"))
.and(warp::path::end())
.and(data_filter)
.and(auth_filter(data.clone(), TokenType::Agent))
.map(endpoint_heartbeat);
let error = warp::post()
.and(warp::path("error"))
.and(warp::path::end())
.and(warp::body::json())
.and(mutex_filter)
.and(auth_filter(data, TokenType::Agent))
.map(endpoint_error);
warp::any()
.and(
config
.or(next_experiment)
.unify()
.or(next_crate)
.unify()
.or(record_progress)
.unify()
.or(heartbeat)
.unify()
.or(error)
.unify(),
)
.map(handle_results)
.recover(handle_errors)
.unify()
}
fn endpoint_config(
caps: Capabilities,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
data.agents.add_capabilities(&auth.name, &caps)?;
Ok(ApiResponse::Success {
result: AgentConfig {
agent_name: auth.name,
crater_config: data.config.clone(),
},
}
.into_response()?)
}
fn endpoint_next_experiment(
mutex: Arc<Mutex<Data>>,
github_data: Option<Arc<GithubData>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
//we need to make sure that Experiment::next executes uninterrupted
let data = mutex.lock().unwrap();
let next = Experiment::next(&data.db, &Assignee::Agent(auth.name))?;
let result = if let Some((new, ex)) = next {
if new {
if let Some(github_data) = github_data.as_ref() {
if let Some(ref github_issue) = ex.github_issue {
Message::new()
.line(
"construction",
format!("Experiment **`{}`** is now **running**", ex.name,),
)
.send(&github_issue.api_url, &data, github_data)?;
}
}
}
Some(ex)
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
fn endpoint_next_crate(
experiment: String,
data: Arc<Data>,
_auth: AuthDetails,
) -> Fallible<Response<Body>> {
let result: Option<crate::crates::Crate> =
if let Some(ex) = Experiment::get(&data.db, &experiment)? {
let mut crates = ex.get_uncompleted_crates(&data.db, Some(1))?;
if crates.is_empty() {
None
} else {
Some(crates.remove(0))
}
} else {
None
};
Ok(ApiResponse::Success { result }.into_response()?)
}
#[derive(Clone)]
pub struct RecordProgressThread {
// String is the worker name
queue: Sender<(ExperimentData<ProgressData>, String)>,
in_flight_requests: Arc<(Mutex<usize>, Condvar)>,
}
impl RecordProgressThread {
pub fn new(
db: crate::db::Database,
metrics: crate::server::metrics::Metrics,
) -> RecordProgressThread {
// 64 message queue, after which we start load shedding automatically.
let (tx, rx) = crossbeam_channel::bounded(64);
let in_flight_requests = Arc::new((Mutex::new(0), Condvar::new()));
let this = RecordProgressThread {
queue: tx,
in_flight_requests,
};
let ret = this.clone();
std::thread::spawn(move || loop {
// Panics should already be logged and otherwise there's not much we
// can/should do.
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
let (result, worker_name) = rx.recv().unwrap();
this.block_until_idle();
let start = std::time::Instant::now();
if let Some(ex) = Experiment::get(&db, &result.experiment_name).unwrap() {
let db = DatabaseDB::new(&db);
if let Err(e) = db.store(&ex, &result.data, EncodingType::Plain) {
// Failing to record a result is basically fine -- this
// just means that we'll have to re-try this job.
log::error!("Failed to store result into database: {:?}", e);
crate::utils::report_failure(&e);
}
metrics.record_completed_jobs(
&worker_name,
&ex.name,
result.data.results.len() as i64,
);
if let Err(e) = db.clear_stale_records() {
// Not a hard failure. We can continue even if we failed
// to clear records from already completed runs...
log::error!("Failed to clear stale records: {:?}", e);
crate::utils::report_failure(&e);
}
metrics
.crater_endpoint_time
.with_label_values(&["record_progress"])
.observe(start.elapsed().as_secs_f64());
}
}));
});
ret
}
pub fn block_until_idle(&self) {
// Wait until there are zero in-flight requests.
//
// Note: We do **not** keep the lock here for the subsequent
// computation. That means that if we ever observe zero, then we're
// going to kick off the below computation; obviously requests may keep
// coming in -- we don't want to block those requests.
//
// The expectation that we will see zero here also implies that
// the server is *sometimes* idle (i.e., we are not constantly
// processing requests at 100% load). It's not clear that's 100%
// a valid assumption, but if we are at 100% load in terms of
// requests coming in, that's a problem in and of itself (since
// the majority of expected requests are record-progress, which
// should be *very* fast now that the work for them is async and
// offloaded to this thread).
//
// Ignore the mutex guard (see above).
drop(
self.in_flight_requests
.1
.wait_while(
self.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()),
|g| *g!= 0,
)
.unwrap_or_else(|g| g.into_inner()),
);
}
pub fn start_request(&self) -> RequestGuard {
*self
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) += 1;
RequestGuard {
thread: self.clone(),
}
}
}
pub struct | {
thread: RecordProgressThread,
}
impl Drop for RequestGuard {
fn drop(&mut self) {
*self
.thread
.in_flight_requests
.0
.lock()
.unwrap_or_else(|l| l.into_inner()) -= 1;
self.thread.in_flight_requests.1.notify_one();
}
}
// This endpoint does not use the mutex data wrapper to exclude running in
// parallel with other endpoints, which may mean that we (for example) are
// recording results for an abort'd experiment. This should generally be fine --
// the database already has foreign_keys enabled and that should ensure
// appropriate synchronization elsewhere. (If it doesn't, that's mostly a bug
// elsewhere, not here).
//
// In practice it's pretty likely that we won't fully run in parallel anyway,
// but this lets some of the work proceed without the lock being held, which is
// generally positive.
fn endpoint_record_progress(
result: ExperimentData<ProgressData>,
data: Arc<Data>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
match data
.record_progress_worker
.queue
.try_send((result, auth.name))
{
Ok(()) => Ok(ApiResponse::Success { result: true }.into_response()?),
Err(crossbeam_channel::TrySendError::Full(_)) => {
data.metrics.crater_bounced_record_progress.inc_by(1);
Ok(ApiResponse::<()>::SlowDown.into_response()?)
}
Err(crossbeam_channel::TrySendError::Disconnected(_)) => unreachable!(),
}
}
fn endpoint_heartbeat(data: Arc<Data>, auth: AuthDetails) -> Fallible<Response<Body>> {
if let Some(rev) = auth.git_revision {
data.agents.set_git_revision(&auth.name, &rev)?;
}
data.agents.record_heartbeat(&auth.name)?;
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn endpoint_error(
error: ExperimentData<HashMap<String, String>>,
mutex: Arc<Mutex<Data>>,
auth: AuthDetails,
) -> Fallible<Response<Body>> {
log::error!(
"agent {} failed while running {}: {:?}",
auth.name,
error.experiment_name,
error.data.get("error")
);
let data = mutex.lock().unwrap();
let ex = Experiment::get(&data.db, &error.experiment_name)?
.ok_or_else(|| err_msg("no experiment run by this agent"))?;
data.metrics.record_error(&auth.name, &ex.name);
Ok(ApiResponse::Success { result: true }.into_response()?)
}
fn handle_results(resp: Fallible<Response<Body>>) -> Response<Body> {
match resp {
Ok(resp) => resp,
Err(err) => ApiResponse::internal_error(err.to_string())
.into_response()
.unwrap(),
}
}
async fn handle_errors(err: Rejection) -> Result<Response<Body>, Rejection> {
let error = if let Some(compat) = err.find::<Compat<HttpError>>() {
Some(*compat.get_ref())
} else if err.is_not_found() {
Some(HttpError::NotFound)
} else {
None
};
match error {
Some(HttpError::NotFound) => Ok(ApiResponse::not_found().into_response().unwrap()),
Some(HttpError::Forbidden) => Ok(ApiResponse::unauthorized().into_response().unwrap()),
None => Err(err),
}
}
| RequestGuard | identifier_name |
message.rs | //! https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-marshaling
use byteorder::{LittleEndian, BigEndian, ReadBytesExt, ByteOrder, WriteBytesExt};
use crate::names::{BusName, InterfaceName, ErrorName, MemberName};
use crate::writer::{DbusWriter, DbusWrite};
use crate::reader::{DbusReader, DbusRead};
use crate::type_system::{ObjectPath, Signature, UnixFd, Serial};
use std::io;
#[cfg(test)]
mod tests {
fn reverse<T: Clone>(xs: &[T]) -> Vec<T> {
let mut rev = vec!();
for x in xs.iter() {
rev.insert(0, x.clone())
}
rev
}
#[cfg(test)]
mod tests {
use crate::message::tests::reverse;
quickcheck! {
fn prop(xs: Vec<u32>) -> bool {
xs == reverse(&reverse(&xs))
}
}
}
}
/// The maximum length of a message, including header, header alignment padding,
/// and body is 2 to the 27th power or 134217728 (128 MiB).
/// Implementations must not send or accept messages exceeding this size.
const MAX_MESSAGE_SIZE: u32 = 2^27;
/// A message consists of a header and a body. If you think of a message as a package,
/// the header is the address, and the body contains the package contents.
/// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data.
struct Message {
/// The message delivery system uses the header information to figure out
/// where to send the message and how to interpret it.
header: Header,
/// The body of the message is made up of zero or more arguments,
/// which are typed values, such as an integer or a byte array.
body: Body,
}
impl Message {
fn write<T>(&self, writer:T) -> Result<(), io::Error>
where T: io::Write
{
let mut writer = DbusWriter::new(writer);
match self.header.endianess_flag {
EndianessFlag::LittleEndian => {
self.header.write::<T, LittleEndian>(&mut writer)?;
self.body.write::<T, LittleEndian>(&mut writer)?;
},
EndianessFlag::BigEndian => {
self.header.write::<T, BigEndian>(&mut writer)?;
self.body.write::<T, BigEndian>(&mut writer)?;
},
};
Ok(())
}
}
/// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian.
/// Both header and body are in this endianness.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum EndianessFlag {
LittleEndian,
BigEndian,
}
impl DbusWrite for EndianessFlag {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
EndianessFlag::LittleEndian => writer.write_u8(b'l'),
EndianessFlag::BigEndian => writer.write_u8(b'B'),
}
}
}
impl DbusRead<EndianessFlag> for EndianessFlag {
fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error>
where T1: io::Read,
T2: ByteOrder
{
match reader.read_u8()? {
b'l' => Ok(EndianessFlag::LittleEndian),
b'B' => Ok(EndianessFlag::BigEndian),
x => {
let str_err = format!("Invalid endianess `{}`", x);
Err(io::Error::new(io::ErrorKind::InvalidData, str_err))
},
}
}
}
/// Message type. Unknown types must be ignored.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MessageType {
/// This is an invalid type.
Invalid = 0,
/// Method call. This message type may prompt a reply.
MethodCall = 1,
/// Method reply with returned data.
MethodReturn = 2,
/// Error reply. If the first argument exists
/// and is a string, it is an error message.
Error = 3,
/// Signal emission.
Signal = 4,
}
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MajorProtocolVersion(pub u8);
impl DbusWrite for MajorProtocolVersion {
fn | <T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.0)
}
}
bitflags! {
struct HeaderFlags: u8 {
/// This message does not expect method return replies or error replies,
/// even if it is of a type that can have a reply; the reply should be omitted.
const NO_REPLY_EXPECTED = 0x1;
/// The bus must not launch an owner for the destination name in response to this message.
const NO_AUTO_START = 0x1;
/// This flag may be set on a method call message to inform the receiving side that the caller
/// is prepared to wait for interactive authorization, which might take a considerable time to complete.
const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4;
}
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HeaderFieldCode {
/// Not a valid field name (error if it appears in a message)
Invalid = 0,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path = 1,
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface = 2,
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member = 3,
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName = 4,
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial = 5,
/// The name of the connection this message is intended for.
/// Optional.
Destination = 6,
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender = 7,
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature = 8,
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds = 9,
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
///
#[repr(u8)]
enum HeaderField {
/// Not a valid field name (error if it appears in a message)
Invalid,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path(ObjectPath),
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface(InterfaceName),
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member(MemberName),
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName(ErrorName),
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial(Serial),
/// The name of the connection this message is intended for.
/// Optional.
Destination(String),
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender(String),
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature(Signature),
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds(u32),
}
impl DbusWrite for HeaderField {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")),
HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?,
HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?,
HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?,
HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?,
HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?,
HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?,
HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?,
HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?,
HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?,
};
Ok(())
}
}
/// The length of the header must be a multiple of 8, allowing the body to begin on
/// an 8-byte boundary when storing the entire message in a single buffer.
/// If the header does not naturally end on an 8-byte boundary up to 7 bytes of
/// nul-initialized alignment padding must be added.
/// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields
struct Header {
endianess_flag: EndianessFlag,
/// Message type. Unknown types must be ignored.
message_type: MessageType,
/// Bitwise OR of flags. Unknown flags must be ignored.
flags: HeaderFlags,
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
major_protocol_version: MajorProtocolVersion,
/// Length in bytes of the message body, starting from the end of the header.
/// The header ends after its alignment padding to an 8-boundary.
length_message_body: u32,
/// The serial of this message, used as a cookie by the sender to identify
/// the reply corresponding to this request. This must not be zero.
serial: Serial,
/// An array of zero or more header fields where the byte is the field code,
/// and the variant is the field value. The message type determines which fields are required.
header_fields: Vec<(HeaderFieldCode, HeaderField)>,
}
impl DbusWrite for Header {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.endianess_flag as u8)?;
writer.write_u8(self.message_type as u8)?;
writer.write_u8(self.flags.bits())?;
writer.write_u8(self.major_protocol_version.0)?;
writer.write_u32::<T2>(self.length_message_body)?;
writer.write_u32::<T2>(self.serial.0)?;
for (ref code, ref field) in self.header_fields.iter().by_ref() {
writer.write_u8(code.clone() as u8)?;
field.write::<T1, T2>(writer)?;
}
Ok(())
}
}
struct Body {
}
impl DbusWrite for Body {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder {
unimplemented!();
}
} | write | identifier_name |
message.rs | //! https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-marshaling
use byteorder::{LittleEndian, BigEndian, ReadBytesExt, ByteOrder, WriteBytesExt};
use crate::names::{BusName, InterfaceName, ErrorName, MemberName};
use crate::writer::{DbusWriter, DbusWrite};
use crate::reader::{DbusReader, DbusRead};
use crate::type_system::{ObjectPath, Signature, UnixFd, Serial};
use std::io;
#[cfg(test)]
mod tests {
fn reverse<T: Clone>(xs: &[T]) -> Vec<T> {
let mut rev = vec!();
for x in xs.iter() {
rev.insert(0, x.clone())
}
rev
}
#[cfg(test)]
mod tests {
use crate::message::tests::reverse;
quickcheck! {
fn prop(xs: Vec<u32>) -> bool {
xs == reverse(&reverse(&xs))
}
}
}
}
/// The maximum length of a message, including header, header alignment padding,
/// and body is 2 to the 27th power or 134217728 (128 MiB).
/// Implementations must not send or accept messages exceeding this size.
const MAX_MESSAGE_SIZE: u32 = 2^27;
/// A message consists of a header and a body. If you think of a message as a package,
/// the header is the address, and the body contains the package contents.
/// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data. | /// The body of the message is made up of zero or more arguments,
/// which are typed values, such as an integer or a byte array.
body: Body,
}
impl Message {
fn write<T>(&self, writer:T) -> Result<(), io::Error>
where T: io::Write
{
let mut writer = DbusWriter::new(writer);
match self.header.endianess_flag {
EndianessFlag::LittleEndian => {
self.header.write::<T, LittleEndian>(&mut writer)?;
self.body.write::<T, LittleEndian>(&mut writer)?;
},
EndianessFlag::BigEndian => {
self.header.write::<T, BigEndian>(&mut writer)?;
self.body.write::<T, BigEndian>(&mut writer)?;
},
};
Ok(())
}
}
/// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian.
/// Both header and body are in this endianness.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum EndianessFlag {
LittleEndian,
BigEndian,
}
impl DbusWrite for EndianessFlag {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
EndianessFlag::LittleEndian => writer.write_u8(b'l'),
EndianessFlag::BigEndian => writer.write_u8(b'B'),
}
}
}
impl DbusRead<EndianessFlag> for EndianessFlag {
fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error>
where T1: io::Read,
T2: ByteOrder
{
match reader.read_u8()? {
b'l' => Ok(EndianessFlag::LittleEndian),
b'B' => Ok(EndianessFlag::BigEndian),
x => {
let str_err = format!("Invalid endianess `{}`", x);
Err(io::Error::new(io::ErrorKind::InvalidData, str_err))
},
}
}
}
/// Message type. Unknown types must be ignored.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MessageType {
/// This is an invalid type.
Invalid = 0,
/// Method call. This message type may prompt a reply.
MethodCall = 1,
/// Method reply with returned data.
MethodReturn = 2,
/// Error reply. If the first argument exists
/// and is a string, it is an error message.
Error = 3,
/// Signal emission.
Signal = 4,
}
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MajorProtocolVersion(pub u8);
impl DbusWrite for MajorProtocolVersion {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.0)
}
}
bitflags! {
struct HeaderFlags: u8 {
/// This message does not expect method return replies or error replies,
/// even if it is of a type that can have a reply; the reply should be omitted.
const NO_REPLY_EXPECTED = 0x1;
/// The bus must not launch an owner for the destination name in response to this message.
const NO_AUTO_START = 0x1;
/// This flag may be set on a method call message to inform the receiving side that the caller
/// is prepared to wait for interactive authorization, which might take a considerable time to complete.
const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4;
}
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HeaderFieldCode {
/// Not a valid field name (error if it appears in a message)
Invalid = 0,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path = 1,
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface = 2,
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member = 3,
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName = 4,
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial = 5,
/// The name of the connection this message is intended for.
/// Optional.
Destination = 6,
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender = 7,
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature = 8,
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds = 9,
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
///
#[repr(u8)]
enum HeaderField {
/// Not a valid field name (error if it appears in a message)
Invalid,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path(ObjectPath),
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface(InterfaceName),
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member(MemberName),
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName(ErrorName),
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial(Serial),
/// The name of the connection this message is intended for.
/// Optional.
Destination(String),
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender(String),
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature(Signature),
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds(u32),
}
impl DbusWrite for HeaderField {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")),
HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?,
HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?,
HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?,
HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?,
HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?,
HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?,
HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?,
HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?,
HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?,
};
Ok(())
}
}
/// The length of the header must be a multiple of 8, allowing the body to begin on
/// an 8-byte boundary when storing the entire message in a single buffer.
/// If the header does not naturally end on an 8-byte boundary up to 7 bytes of
/// nul-initialized alignment padding must be added.
/// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields
struct Header {
endianess_flag: EndianessFlag,
/// Message type. Unknown types must be ignored.
message_type: MessageType,
/// Bitwise OR of flags. Unknown flags must be ignored.
flags: HeaderFlags,
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
major_protocol_version: MajorProtocolVersion,
/// Length in bytes of the message body, starting from the end of the header.
/// The header ends after its alignment padding to an 8-boundary.
length_message_body: u32,
/// The serial of this message, used as a cookie by the sender to identify
/// the reply corresponding to this request. This must not be zero.
serial: Serial,
/// An array of zero or more header fields where the byte is the field code,
/// and the variant is the field value. The message type determines which fields are required.
header_fields: Vec<(HeaderFieldCode, HeaderField)>,
}
impl DbusWrite for Header {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.endianess_flag as u8)?;
writer.write_u8(self.message_type as u8)?;
writer.write_u8(self.flags.bits())?;
writer.write_u8(self.major_protocol_version.0)?;
writer.write_u32::<T2>(self.length_message_body)?;
writer.write_u32::<T2>(self.serial.0)?;
for (ref code, ref field) in self.header_fields.iter().by_ref() {
writer.write_u8(code.clone() as u8)?;
field.write::<T1, T2>(writer)?;
}
Ok(())
}
}
struct Body {
}
impl DbusWrite for Body {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder {
unimplemented!();
}
} | struct Message {
/// The message delivery system uses the header information to figure out
/// where to send the message and how to interpret it.
header: Header, | random_line_split |
message.rs | //! https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-marshaling
use byteorder::{LittleEndian, BigEndian, ReadBytesExt, ByteOrder, WriteBytesExt};
use crate::names::{BusName, InterfaceName, ErrorName, MemberName};
use crate::writer::{DbusWriter, DbusWrite};
use crate::reader::{DbusReader, DbusRead};
use crate::type_system::{ObjectPath, Signature, UnixFd, Serial};
use std::io;
#[cfg(test)]
mod tests {
fn reverse<T: Clone>(xs: &[T]) -> Vec<T> {
let mut rev = vec!();
for x in xs.iter() {
rev.insert(0, x.clone())
}
rev
}
#[cfg(test)]
mod tests {
use crate::message::tests::reverse;
quickcheck! {
fn prop(xs: Vec<u32>) -> bool {
xs == reverse(&reverse(&xs))
}
}
}
}
/// The maximum length of a message, including header, header alignment padding,
/// and body is 2 to the 27th power or 134217728 (128 MiB).
/// Implementations must not send or accept messages exceeding this size.
const MAX_MESSAGE_SIZE: u32 = 2^27;
/// A message consists of a header and a body. If you think of a message as a package,
/// the header is the address, and the body contains the package contents.
/// Both header and body use the D-Bus [type system](https://dbus.freedesktop.org/doc/dbus-specification.html#type-system) and format for serializing data.
struct Message {
/// The message delivery system uses the header information to figure out
/// where to send the message and how to interpret it.
header: Header,
/// The body of the message is made up of zero or more arguments,
/// which are typed values, such as an integer or a byte array.
body: Body,
}
impl Message {
fn write<T>(&self, writer:T) -> Result<(), io::Error>
where T: io::Write
{
let mut writer = DbusWriter::new(writer);
match self.header.endianess_flag {
EndianessFlag::LittleEndian => {
self.header.write::<T, LittleEndian>(&mut writer)?;
self.body.write::<T, LittleEndian>(&mut writer)?;
},
EndianessFlag::BigEndian => {
self.header.write::<T, BigEndian>(&mut writer)?;
self.body.write::<T, BigEndian>(&mut writer)?;
},
};
Ok(())
}
}
/// Endianness flag; ASCII 'l' for little-endian or ASCII 'B' for big-endian.
/// Both header and body are in this endianness.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum EndianessFlag {
LittleEndian,
BigEndian,
}
impl DbusWrite for EndianessFlag {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
EndianessFlag::LittleEndian => writer.write_u8(b'l'),
EndianessFlag::BigEndian => writer.write_u8(b'B'),
}
}
}
impl DbusRead<EndianessFlag> for EndianessFlag {
fn read<T1, T2>(&self, reader: &mut DbusReader<T1>) -> Result<EndianessFlag, io::Error>
where T1: io::Read,
T2: ByteOrder
{
match reader.read_u8()? {
b'l' => Ok(EndianessFlag::LittleEndian),
b'B' => Ok(EndianessFlag::BigEndian),
x => {
let str_err = format!("Invalid endianess `{}`", x);
Err(io::Error::new(io::ErrorKind::InvalidData, str_err))
},
}
}
}
/// Message type. Unknown types must be ignored.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MessageType {
/// This is an invalid type.
Invalid = 0,
/// Method call. This message type may prompt a reply.
MethodCall = 1,
/// Method reply with returned data.
MethodReturn = 2,
/// Error reply. If the first argument exists
/// and is a string, it is an error message.
Error = 3,
/// Signal emission.
Signal = 4,
}
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MajorProtocolVersion(pub u8);
impl DbusWrite for MajorProtocolVersion {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
writer.write_u8(self.0)
}
}
bitflags! {
struct HeaderFlags: u8 {
/// This message does not expect method return replies or error replies,
/// even if it is of a type that can have a reply; the reply should be omitted.
const NO_REPLY_EXPECTED = 0x1;
/// The bus must not launch an owner for the destination name in response to this message.
const NO_AUTO_START = 0x1;
/// This flag may be set on a method call message to inform the receiving side that the caller
/// is prepared to wait for interactive authorization, which might take a considerable time to complete.
const ALLOW_INTERACTIVE_AUTHORIZATION = 0x4;
}
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HeaderFieldCode {
/// Not a valid field name (error if it appears in a message)
Invalid = 0,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path = 1,
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface = 2,
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member = 3,
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName = 4,
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial = 5,
/// The name of the connection this message is intended for.
/// Optional.
Destination = 6,
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender = 7,
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature = 8,
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds = 9,
}
/// The array at the end of the header contains header fields,
/// where each field is a 1-byte field code followed by a field value.
/// A header must contain the required header fields for its message type,
/// and zero or more of any optional header fields.
///
#[repr(u8)]
enum HeaderField {
/// Not a valid field name (error if it appears in a message)
Invalid,
/// The object to send a call to, or the object a signal is emitted from.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Path(ObjectPath),
/// The interface to invoke a method call on, or that a signal is emitted from.
/// Required in `MessageType::Signal`.
Interface(InterfaceName),
/// The member, either the method name or signal name.
/// This header field is controlled by the message sender.
/// Required in `MessageType::MethodCall` and `MessageType::Signal`.
Member(MemberName),
/// The name of the error that occurred, for errors.
/// Required in `MessageType::Error`.
ErrorName(ErrorName),
/// The serial number of the message this message is a reply to.
/// Required in `MessageType::Error` and `MessageType::MethodReturn`.
ReplySerial(Serial),
/// The name of the connection this message is intended for.
/// Optional.
Destination(String),
/// Unique name of the sending connection. This field is usually only meaningful
/// in combination with the message bus, but other servers may define their own meanings for it.
/// Optional.
Sender(String),
/// The signature of the message body. If omitted, it is assumed to be the empty signature "".
/// Optional.
Signature(Signature),
/// The number of Unix file descriptors that accompany the message.
/// If omitted, it is assumed that no Unix file descriptors accompany the message.
UnixFds(u32),
}
impl DbusWrite for HeaderField {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
{
match self {
HeaderField::Invalid => return Err(io::Error::new(io::ErrorKind::InvalidInput, "HeaderField::Invalid can not be marshaled!")),
HeaderField::Path(object_path) => object_path.write::<_, T2>(writer)?,
HeaderField::Interface(interface_name) => interface_name.write::<_, T2>(writer)?,
HeaderField::Member(member_name) => member_name.write::<_, T2>(writer)?,
HeaderField::ErrorName(error_name) => error_name.write::<_, T2>(writer)?,
HeaderField::ReplySerial(serial) => serial.write::<_, T2>(writer)?,
HeaderField::Destination(destination) => writer.write_string::<T2>(destination)?,
HeaderField::Sender(sender) => writer.write_string::<T2>(sender)?,
HeaderField::Signature(signature) => signature.write::<_, T2>(writer)?,
HeaderField::UnixFds(fd) => writer.write_u32::<T2>(*fd)?,
};
Ok(())
}
}
/// The length of the header must be a multiple of 8, allowing the body to begin on
/// an 8-byte boundary when storing the entire message in a single buffer.
/// If the header does not naturally end on an 8-byte boundary up to 7 bytes of
/// nul-initialized alignment padding must be added.
/// https://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-header-fields
struct Header {
endianess_flag: EndianessFlag,
/// Message type. Unknown types must be ignored.
message_type: MessageType,
/// Bitwise OR of flags. Unknown flags must be ignored.
flags: HeaderFlags,
/// Major protocol version of the sending application.
/// If the major protocol version of the receiving application does not match,
/// the applications will not be able to communicate and the D-Bus connection must be disconnected.
major_protocol_version: MajorProtocolVersion,
/// Length in bytes of the message body, starting from the end of the header.
/// The header ends after its alignment padding to an 8-boundary.
length_message_body: u32,
/// The serial of this message, used as a cookie by the sender to identify
/// the reply corresponding to this request. This must not be zero.
serial: Serial,
/// An array of zero or more header fields where the byte is the field code,
/// and the variant is the field value. The message type determines which fields are required.
header_fields: Vec<(HeaderFieldCode, HeaderField)>,
}
impl DbusWrite for Header {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder
|
}
struct Body {
}
impl DbusWrite for Body {
fn write<T1, T2>(&self, writer: &mut DbusWriter<T1>) -> Result<(), io::Error>
where T1: io::Write,
T2: ByteOrder {
unimplemented!();
}
} | {
writer.write_u8(self.endianess_flag as u8)?;
writer.write_u8(self.message_type as u8)?;
writer.write_u8(self.flags.bits())?;
writer.write_u8(self.major_protocol_version.0)?;
writer.write_u32::<T2>(self.length_message_body)?;
writer.write_u32::<T2>(self.serial.0)?;
for (ref code, ref field) in self.header_fields.iter().by_ref() {
writer.write_u8(code.clone() as u8)?;
field.write::<T1, T2>(writer)?;
}
Ok(())
} | identifier_body |
lib.rs | mod business_logic_layer;
mod data_access_layer;
mod entities;
use crate::business_logic_layer as bll;
pub use crate::data_access_layer::MAX_DATAGRAM_SIZE;
use crate::data_access_layer::{TypedClientSocket, TypedServerSocket};
pub use crate::entities::Exception;
use std::collections::HashMap;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[derive(Debug)]
///Events from server.
pub enum ServerEvent {
///Error on read data from socket.
ExceptionOnRecv(Exception),
///Error on write data to socket.
ExceptionOnSend((SocketAddr, Exception)),
}
pub type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
}
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn | (&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if!self.servers.contains_key(client) {
self.servers.insert(client.clone(), bll::Server::new());
}
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers {
let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
}
exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30);
// let mut timer = time::Instant::now();
// loop {
// if timer.elapsed() > SEND_TIMEOUT {
// timer = time::Instant::now();
// match rx1.try_recv() {
// Ok(b) => client.send(b).map_err(|e| error!("{}", e)),
// Err(mpsc::TryRecvError::Disconnected) => break,
// Err(e) => Err(error!("{}", e)),
// };
// };
// client.recv()
// .map_err(|e|error!("{}",e))
// .and_then(|b| tx2.send(b)
// .map_err(|e|error!("{}",e)));
//
// }
// });
// (tx1, rx2)
// }
//
// ///Send data to server
// /// Don't block current thread
// pub fn send(&self, command: Vec<u8>) {
// self.commands.send(command).map_err(|e| error!("{}", e));
// }
//
// ///Reads data fro server
// /// Don't block current thread
// /// Return None if there is no data available
// pub fn recv(&self) -> Option<Vec<u8>> {
// self.states.try_recv().ok()
// }
//}
//#[cfg(test)]
//mod tests {
// #[test]
// fn it_works() {
// assert_eq!(1, 1);
// }
//}
| remove | identifier_name |
lib.rs | mod business_logic_layer;
mod data_access_layer;
mod entities;
use crate::business_logic_layer as bll;
pub use crate::data_access_layer::MAX_DATAGRAM_SIZE;
use crate::data_access_layer::{TypedClientSocket, TypedServerSocket};
pub use crate::entities::Exception;
use std::collections::HashMap;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[derive(Debug)]
///Events from server.
pub enum ServerEvent {
///Error on read data from socket.
ExceptionOnRecv(Exception),
///Error on write data to socket.
ExceptionOnSend((SocketAddr, Exception)),
}
pub type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> |
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn remove(&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if!self.servers.contains_key(client) {
self.servers.insert(client.clone(), bll::Server::new());
}
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers {
let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
}
exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30);
// let mut timer = time::Instant::now();
// loop {
// if timer.elapsed() > SEND_TIMEOUT {
// timer = time::Instant::now();
// match rx1.try_recv() {
// Ok(b) => client.send(b).map_err(|e| error!("{}", e)),
// Err(mpsc::TryRecvError::Disconnected) => break,
// Err(e) => Err(error!("{}", e)),
// };
// };
// client.recv()
// .map_err(|e|error!("{}",e))
// .and_then(|b| tx2.send(b)
// .map_err(|e|error!("{}",e)));
//
// }
// });
// (tx1, rx2)
// }
//
// ///Send data to server
// /// Don't block current thread
// pub fn send(&self, command: Vec<u8>) {
// self.commands.send(command).map_err(|e| error!("{}", e));
// }
//
// ///Reads data fro server
// /// Don't block current thread
// /// Return None if there is no data available
// pub fn recv(&self) -> Option<Vec<u8>> {
// self.states.try_recv().ok()
// }
//}
//#[cfg(test)]
//mod tests {
// #[test]
// fn it_works() {
// assert_eq!(1, 1);
// }
//}
| {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
} | identifier_body |
lib.rs | mod business_logic_layer;
mod data_access_layer;
mod entities;
use crate::business_logic_layer as bll;
pub use crate::data_access_layer::MAX_DATAGRAM_SIZE;
use crate::data_access_layer::{TypedClientSocket, TypedServerSocket};
pub use crate::entities::Exception;
use std::collections::HashMap;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[derive(Debug)]
///Events from server.
pub enum ServerEvent {
///Error on read data from socket.
ExceptionOnRecv(Exception),
///Error on write data to socket.
ExceptionOnSend((SocketAddr, Exception)),
}
pub type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
}
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn remove(&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if!self.servers.contains_key(client) |
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers {
let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
}
exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30);
// let mut timer = time::Instant::now();
// loop {
// if timer.elapsed() > SEND_TIMEOUT {
// timer = time::Instant::now();
// match rx1.try_recv() {
// Ok(b) => client.send(b).map_err(|e| error!("{}", e)),
// Err(mpsc::TryRecvError::Disconnected) => break,
// Err(e) => Err(error!("{}", e)),
// };
// };
// client.recv()
// .map_err(|e|error!("{}",e))
// .and_then(|b| tx2.send(b)
// .map_err(|e|error!("{}",e)));
//
// }
// });
// (tx1, rx2)
// }
//
// ///Send data to server
// /// Don't block current thread
// pub fn send(&self, command: Vec<u8>) {
// self.commands.send(command).map_err(|e| error!("{}", e));
// }
//
// ///Reads data fro server
// /// Don't block current thread
// /// Return None if there is no data available
// pub fn recv(&self) -> Option<Vec<u8>> {
// self.states.try_recv().ok()
// }
//}
//#[cfg(test)]
//mod tests {
// #[test]
// fn it_works() {
// assert_eq!(1, 1);
// }
//}
| {
self.servers.insert(client.clone(), bll::Server::new());
} | conditional_block |
lib.rs | mod business_logic_layer;
mod data_access_layer;
mod entities;
use crate::business_logic_layer as bll;
pub use crate::data_access_layer::MAX_DATAGRAM_SIZE;
use crate::data_access_layer::{TypedClientSocket, TypedServerSocket};
pub use crate::entities::Exception;
use std::collections::HashMap;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::Duration;
#[derive(Debug)]
///Events from server.
pub enum ServerEvent {
///Error on read data from socket.
ExceptionOnRecv(Exception),
///Error on write data to socket.
ExceptionOnSend((SocketAddr, Exception)),
}
pub type ContinueRunning = bool;
///Game to use with server must implement this trait.
pub trait Game {
/// delta_time: time elapsed from last call.
/// command: ordered commands commands from server.
/// from: Address of command sender.
/// Returns bool value indicating
/// should server continue running if false stops server.
/// Called only when new commands come to server.
/// Commands ordered and with some guarantees.
fn handle_command(
&mut self,
delta_time: Duration,
commands: Vec<Vec<u8>>,
from: SocketAddr,
) -> ContinueRunning;
///Gets new state to send to client.
/// delta_time: time elapsed throw last call.
/// Returns bytes with new game state for client.
/// Called once in about 30 milliseconds.
/// Sends state only to clients connected to server.
///Ordered and without some guarantees.
/// If returns empty Vec<u8> then server skips sending it and go to next iteration
fn draw(&mut self, delta_time: Duration) -> Vec<u8>;
///Allow client with this IP Address work with server.
/// If false server don't send new state to this client.
/// Usually don't implement this method. Use default implementation.
fn allow_connect(&mut self, _from: &SocketAddr) -> bool {
true
}
///Handles events from server.
/// Returns bool value.
/// If returns false stops server.
/// Usually don't implement this method. Use default implementation.
fn handle_server_event(&mut self, _event: ServerEvent) -> ContinueRunning {
true
}
///Client to add to recv state from server.
/// If returns not None then servers on draw sends new state to this client.
/// If client with this IP Address already connected then nothing happens.
/// Usually don't implement this method. Use default implementation.
fn add_client(&mut self) -> Option<SocketAddr> {
None
}
///Disconnect this client from server and don't send new state to them.
/// Usually don't implement this method. Use default implementation.
fn remove_client(&mut self) -> Option<SocketAddr> {
None
}
}
/// Client used to communicate with [`GameServer`]. Must be singleton in your app.
pub struct ClientSocket {
socket: TypedClientSocket,
client: bll::Client,
}
impl ClientSocket {
///Create new client and listen on port to recv packets from server_address and send its to them.
pub fn new(port: u16, server_address: impl ToSocketAddrs) -> Result<ClientSocket, Exception> {
Ok(ClientSocket {
socket: TypedClientSocket::new(port, server_address)?,
client: bll::Client::new(),
})
}
///Send data to server
/// Don't block current thread
/// may wait up to 30 milliseconds if you send commands too often
///Commands ordered and with some guarantees.
pub fn send(&mut self, command: Vec<u8>) -> Result<usize, Exception> {
let command = self.client.send(command);
self.socket.write(&command)
}
///Reads data from server.
/// Don't block current thread.
/// Return [`Exception`] with [`std::io::ErrorKind::WouldBlock`] if there is no data available.
///Data ordered and without some guarantees.
pub fn recv(&mut self) -> Result<Vec<u8>, Exception> {
let state = self.socket.read()?;
let (state, lost) = self.client.recv(state)?;
for command in lost {
self.socket.write(&command)?;
}
Ok(state)
}
}
struct ServerSocket {
socket: TypedServerSocket,
servers: HashMap<SocketAddr, bll::Server>,
}
impl ServerSocket {
pub fn new(port: u16) -> Result<ServerSocket, Exception> {
Ok(ServerSocket {
socket: TypedServerSocket::new(port)?,
servers: HashMap::new(),
})
}
pub fn recv(&mut self) -> Result<(Vec<Vec<u8>>, SocketAddr), Exception> {
let (command, from) = self.socket.read()?;
self.add(&from);
let command = self.servers.get_mut(&from).unwrap().recv(command)?;
Ok((command, from))
}
pub fn remove(&mut self, client: &SocketAddr) {
self.servers.remove(&client);
}
pub fn add(&mut self, client: &SocketAddr) {
if!self.servers.contains_key(client) {
self.servers.insert(client.clone(), bll::Server::new());
}
}
pub fn send_to_all(&mut self, state: Vec<u8>) -> Vec<(SocketAddr, Exception)> {
let mut exceptions = Vec::new();
for (a, s) in &mut self.servers { | exceptions
}
}
const DRAW_PERIOD_IN_MILLIS: u64 = 30;
///Game server to run [`Game`]
pub struct GameServer<T: Game> {
game: T,
socket: ServerSocket,
is_running: bool,
draw_timer: bll::timer::WaitTimer,
update_timer: bll::timer::ElapsedTimer,
after_draw_elapsed_timer: bll::timer::ElapsedTimer,
}
impl<T: Game> GameServer<T> {
///Crates new server listening port
pub fn new(game: T, port: u16) -> Result<GameServer<T>, Exception> {
Ok(GameServer {
game,
socket: ServerSocket::new(port)?,
is_running: true,
draw_timer: bll::timer::WaitTimer::new(DRAW_PERIOD_IN_MILLIS),
update_timer: bll::timer::ElapsedTimer::new(),
after_draw_elapsed_timer: bll::timer::ElapsedTimer::new(),
})
}
///Runs game update - draw circle.
/// Blocks current thread.
pub fn run(&mut self) {
while self.is_running {
self.update();
self.draw()
}
}
fn draw(&mut self) {
if self.draw_timer.continue_execution() {
let state = self.game.draw(self.after_draw_elapsed_timer.elapsed());
if state.is_empty() {
return;
}
self.game.add_client().map(|a| self.socket.add(&a));
self.game.remove_client().map(|a| self.socket.remove(&a));
self.is_running &= self
.socket
.send_to_all(state)
.into_iter()
.map(|ex| {
self.game
.handle_server_event(ServerEvent::ExceptionOnSend(ex))
})
.all(|b| b);
}
}
fn update(&mut self) {
let _ = self
.socket
.recv()
.map(|(commands, from)| {
if self.game.allow_connect(&from) {
self.is_running &=
self.game
.handle_command(self.update_timer.elapsed(), commands, from);
} else {
self.socket.remove(&from);
}
})
.map_err(|e| {
self.is_running &= self
.game
.handle_server_event(ServerEvent::ExceptionOnRecv(e))
});
}
}
//trait Game {
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8>;
//}
//
//struct GameProxy {
// game: std::sync::Arc<std::sync::Mutex<Game>>
//}
//
//impl GameProxy {
// fn new(game: std::sync::Arc<std::sync::Mutex<Game>>) -> GameProxy {
//// let mut client = crate::data_access_layer::TypedClientSocket::new("sdsf", "sdfsf").unwrap();
//// let mut server = crate::data_access_layer::TypedServerSocket::new("asdfaf").unwrap();
// GameProxy { game }
// }
//
// fn update(&mut self, delta_time: std::time::Duration, commands: Vec<Vec<u8>>, from_address: &str) -> Vec<u8> {
// let mut game = self.game.lock().unwrap();
// game.update(delta_time, commands, from_address)
// }
//}
///// Client used to communicate with server. Must be singleton in your app
//pub struct Client {
// commands: mpsc::Sender<Vec<u8>>,
// states: mpsc::Receiver<Vec<u8>>,
//}
//
//impl Client {
// ///Create new client and listen on port to recv packets from server_address and send its to them
// pub fn new(port: &str, server_address: &str) -> Result<Client, Exception> {
// let mut client = crate::business_logic_layer::Client::new(port, server_address)?;
// crate::data_access_layer::logger::init(LevelFilter::Info)?;
// let (tx, rx) = Client::run_process(client);
// Ok(Client { commands: tx, states: rx })
// }
//
// fn run_process(mut client: crate::business_logic_layer::Client) -> (mpsc::Sender<Vec<u8>>, mpsc::Receiver<Vec<u8>>) {
// let (tx1, rx1) = mpsc::channel();
// let (tx2, rx2) = mpsc::channel();
// thread::spawn(move || {
// const SEND_TIMEOUT: time::Duration = time::Duration::from_millis(30);
// let mut timer = time::Instant::now();
// loop {
// if timer.elapsed() > SEND_TIMEOUT {
// timer = time::Instant::now();
// match rx1.try_recv() {
// Ok(b) => client.send(b).map_err(|e| error!("{}", e)),
// Err(mpsc::TryRecvError::Disconnected) => break,
// Err(e) => Err(error!("{}", e)),
// };
// };
// client.recv()
// .map_err(|e|error!("{}",e))
// .and_then(|b| tx2.send(b)
// .map_err(|e|error!("{}",e)));
//
// }
// });
// (tx1, rx2)
// }
//
// ///Send data to server
// /// Don't block current thread
// pub fn send(&self, command: Vec<u8>) {
// self.commands.send(command).map_err(|e| error!("{}", e));
// }
//
// ///Reads data fro server
// /// Don't block current thread
// /// Return None if there is no data available
// pub fn recv(&self) -> Option<Vec<u8>> {
// self.states.try_recv().ok()
// }
//}
//#[cfg(test)]
//mod tests {
// #[test]
// fn it_works() {
// assert_eq!(1, 1);
// }
//} | let _ = self
.socket
.write(a, &s.send(state.clone()))
.map_err(|e| exceptions.push((*a, e)));
} | random_line_split |
main.rs | #![allow(unused)]
#![allow(non_snake_case)]
use crate::db::MyDbContext;
use serenity::model::prelude::*;
use sqlx::Result;
use serenity::{
async_trait,
client::bridge::gateway::{GatewayIntents, ShardId, ShardManager},
framework::standard::{
buckets::{LimitedFor, RevertBucket},
help_commands,
macros::{command, group, help, hook},
Args, CommandGroup, CommandOptions, CommandResult, DispatchError, HelpOptions, Reason,
StandardFramework,
},
http::Http,
model::{
channel::{Channel, Message},
gateway::Ready,
guild::Guild,
id::UserId,
permissions::Permissions,
},
utils::{content_safe, ContentSafeOptions},
};
use std::{
collections::{HashMap, HashSet},
env,
fmt::Write,
sync::Arc,
};
use serenity::prelude::*;
use tokio::sync::Mutex;
mod admin_commands;
mod autopanic;
mod blob_blacklist_conversions;
mod commands;
mod db;
use crate::admin_commands::*;
use crate::autopanic::*;
use crate::commands::*;
use std::convert::TryInto;
use std::process::exit;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{sleep, Duration};
// A container type is created for inserting into the Client's `data`, which
// allows for data to be accessible across all events and framework commands, or
// anywhere else that has a copy of the `data` Arc.
struct ShardManagerContainer;
impl TypeMapKey for ShardManagerContainer {
type Value = Arc<Mutex<ShardManager>>;
}
impl TypeMapKey for db::MyDbContext {
type Value = db::MyDbContext;
}
impl TypeMapKey for autopanic::Gramma {
type Value = autopanic::Gramma;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn guild_create(&self, ctx: Context, guild: Guild, is_new: bool) {
let mut data = ctx.data.write().await;
let mut dbcontext = data
.get_mut::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.");
let id = &guild.id.0;
if let Some(s) = dbcontext.fetch_settings(id).await {
println!("Found guild {} settings", id);
dbcontext.cache.insert(*id, s);
} else {
println!("Creating a new settings row for guild {}", id);
dbcontext.add_guild(id).await; // also adds to cache
//greet_new_guild(&ctx, &guild).await;
};
set_status(&ctx).await;
}
async fn channel_pins_update(&self, ctx: Context, _pins: ChannelPinsUpdateEvent) {
println!("yeet doing a garbage run");
garbage_collect(&ctx);
println!("done");
}
async fn guild_member_addition(&self, ctx: Context, guild_id: GuildId, mut new_member: Member) {
println!("new member joined {}: {}", guild_id, new_member.user.name);
{
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild_id.0);
mom.recent_users.insert(
new_member
.joined_at
.unwrap()
.timestamp_millis()
.try_into()
.unwrap(),
new_member.user.id.0,
);
}
check_against_joins(&ctx, guild_id.0).await;
check_against_blacklist(&ctx, new_member, guild_id.0).await;
}
async fn message(&self, ctx: Context, new_message: Message) {
/*
if new_message.content.len() > 20_usize {
println!("Message! {}...", &new_message.content[..19]);
} else {
println!("Message! {}", &new_message.content);
}*/
// we use the message timestamp instead of time::now because of potential lag of events
let timestamp: u64 = new_message.timestamp.timestamp_millis().try_into().unwrap();
let guild = new_message.guild_id.unwrap().0;
let author = new_message.author.id.0;
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild);
if!new_message.mentions.is_empty() {
mom.userpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if!new_message.mention_roles.is_empty() {
mom.rollpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if!new_message.mention_roles.is_empty() ||!new_message.mentions.is_empty() {
autopanic::check_against_pings(&ctx, mom, guild).await;
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
pub async fn better_default_channel(guild: &Guild, uid: UserId) -> Option<Vec<&GuildChannel>> {
let member = guild.members.get(&uid)?;
let mut out = vec![];
for channel in guild.channels.values() {
if channel.kind == ChannelType::Text
&& guild
.user_permissions_in(channel, member)
.ok()?
.send_messages()
&& guild
.user_permissions_in(channel, member)
.ok()?
.read_messages()
{
let x = guild.user_permissions_in(channel, member).expect("goo");
//return Some(channel);
dbg!(x);
println!("{:?}", x.bits);
println!("{}", channel.name);
out.push(channel);
}
}
if out.is_empty() {
None
} else {
Some(out)
}
}
async fn greet_new_guild(ctx: &Context, guild: &Guild) {
println!("h");
if let Some(channelvec) = better_default_channel(guild, UserId(802019556801511424_u64)).await {
println!("i");
for channel in channelvec {
println!("{}", channel.name);
let res = channel.say(&ctx, "
Thanks for adding me to the server! Here's some next steps:\n
Configure who can run most commands (like turning on or off panic mode): run `bb-settings set roll_that_can_panic Staff` for example (if you have a roll called Staff)\n
I recommend that you set up a log channel for me to talk in (and set it like `bb-settings set logs #mychannel` but replace mychannel with the actual one) \n
Also probs tell me a roll for me to ping when I automatically detect a raid and go into panic mode (`bb-settings set notify raidresponders` - replacing raidresponders with that roll)\n
Reviewing default settings is recommended - `bb-settings` and adjust them as you wish. `bb-help` shows all my commands.\n
If you find yourself needing support, there's a support server invite in `bb-about`\
").await;
if res.is_ok() {
return;
}
}
} else {
println!(
"hey i wanted to greet {} {} but they wont let everyone talk",
guild.name, guild.id.0
);
}
}
async fn set_status(ctx: &Context) {
ctx.shard.set_status(OnlineStatus::DoNotDisturb);
let s = format!("to {} guilds | bb-help", ctx.cache.guild_count().await);
ctx.shard.set_activity(Some(Activity::listening(&*s)));
}
#[group]
#[commands(panic, uinfo, forceban, help, delete)]
struct General;
#[group]
#[commands(about, ping, die, update, free, git_push, garbage, foo)] // status)]
struct Meta;
#[group]
// Sets multiple prefixes for a group.
// This requires us to call commands in this group
// via `~emoji` (or `~em`) instead of just `~`.
#[prefixes("settings", "s")]
// Set a description to appear if a user wants to display a single group
// e.g. via help using the group-name or one of its prefixes.
// Summary only appears when listing multiple groups.
// Sets a command that will be executed if only a group-prefix was passed.
#[default_command(show)]
#[commands(reset, set)]
struct Settings;
#[group]
#[prefixes("blacklist", "bl")]
#[default_command(blacklist_show)]
#[commands(remove, add)]
struct Blacklist;
#[hook] // this appears not to work
async fn before(ctx: &Context, msg: &Message, command_name: &str) -> bool {
println!(
"Got command '{}' by user '{}'",
command_name, msg.author.name
);
true // if `before` returns false, command processing doesn't happen.
}
#[hook]
async fn after(_ctx: &Context, _msg: &Message, command_name: &str, command_result: CommandResult) {
match command_result {
Ok(()) => println!("Processed command '{}'", command_name),
Err(why) => println!("Command '{}' returned error {:?}", command_name, why),
}
}
#[hook]
async fn unknown_command(_ctx: &Context, _msg: &Message, unknown_command_name: &str) {
println!("Could not find command named '{}'", unknown_command_name);
}
#[hook]
async fn | (ctx: &Context, msg: &Message, error: DispatchError) {
if let DispatchError::Ratelimited(info) = error {
// We notify them only once.
if info.is_first_try {
let _ = msg
.channel_id
.say(
&ctx.http,
&format!("Try this again in {} seconds.", info.as_secs()),
)
.await;
}
}
}
#[tokio::main]
async fn main() {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
println!("{:?}", since_the_epoch);
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's owners and id
let bot_id = match http.get_current_application_info().await {
Ok(_) => match http.get_current_user().await {
Ok(bot_id) => bot_id.id,
Err(why) => panic!("Could not access the bot id: {:?}", why),
},
Err(why) => panic!("Could not access application info: {:?}", why),
};
let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true)
.on_mention(Some(bot_id))
.prefix("bb-")
.case_insensitivity(true)
.allow_dm(false)
})
.unrecognised_command(unknown_command)
// Set a function that's called whenever a command's execution didn't complete for one
// reason or another. For example, when a user has exceeded a rate-limit or a command
// can only be performed by the bot owner.
.on_dispatch_error(dispatch_error)
// The `#[group]` macro generates `static` instances of the options set for the group.
// They're made in the pattern: `#name_GROUP` for the group instance and `#name_GROUP_OPTIONS`.
// #name is turned all uppercase
.group(&GENERAL_GROUP)
.group(&BLACKLIST_GROUP)
.group(&SETTINGS_GROUP)
.group(&META_GROUP);
let mut client = Client::builder(&token)
.event_handler(Handler)
.framework(framework)
.intents(
GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::privileged(),
)
.await
.expect("Err creating client");
{
let conn = sqlx::SqlitePool::connect("db.sqlite").await;
let mut data = client.data.write().await;
data.insert::<db::MyDbContext>(MyDbContext::new(conn.unwrap()));
data.insert::<autopanic::Gramma>(autopanic::Gramma::new());
data.insert::<ShardManagerContainer>(Arc::clone(&client.shard_manager));
}
if let Err(why) = client.start().await {
println!("Client error: {:?}", why);
}
}
async fn garbage_collect(ctx: &Context) {
let now = autopanic::time_now();
let mut data = ctx.data.write().await;
let settings_map = &data
.get::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.").cache.clone();
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
// iterate over gramma to get your mom for each guild
// each your mom will have a settings attached, as well as memory of joins and whatnot
// make and save a new list of just the joins that are currently relevant and discard the previous
for (k, v) in grammy.guild_mamas.iter_mut() {
if let Some(settings) = settings_map.get(&k) {
let max_age = settings.time; // duration we keep join records, in seconds
let mut new_recent_users: HashMap<u64, u64> = HashMap::new();
for (timestamp, user) in v.recent_users.iter_mut() { // timestamp joined, userid
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_recent_users.insert(*timestamp, *user);
}
}
v.recent_users = new_recent_users;
let max_age = settings.mentiontime;
let mut new_userpings: HashMap<u64, (usize, u64)> = HashMap::new();
for (timestamp, user) in v.userpings.iter() {
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_userpings.insert(*timestamp, *user);
}
}
v.userpings = new_userpings;
let mut new_rollpings: HashMap<u64, (usize, u64)> = HashMap::new();
for (timestamp, user) in v.rollpings.iter() {
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_rollpings.insert(*timestamp, *user);
}
}
v.rollpings = new_rollpings;
}
}
}
| dispatch_error | identifier_name |
main.rs | #![allow(unused)]
#![allow(non_snake_case)]
use crate::db::MyDbContext;
use serenity::model::prelude::*;
use sqlx::Result;
use serenity::{
async_trait,
client::bridge::gateway::{GatewayIntents, ShardId, ShardManager},
framework::standard::{
buckets::{LimitedFor, RevertBucket},
help_commands,
macros::{command, group, help, hook},
Args, CommandGroup, CommandOptions, CommandResult, DispatchError, HelpOptions, Reason,
StandardFramework,
},
http::Http,
model::{
channel::{Channel, Message},
gateway::Ready,
guild::Guild,
id::UserId,
permissions::Permissions,
},
utils::{content_safe, ContentSafeOptions},
};
use std::{
collections::{HashMap, HashSet},
env,
fmt::Write,
sync::Arc,
};
use serenity::prelude::*;
use tokio::sync::Mutex;
mod admin_commands;
mod autopanic;
mod blob_blacklist_conversions;
mod commands;
mod db;
use crate::admin_commands::*;
use crate::autopanic::*;
use crate::commands::*;
use std::convert::TryInto;
use std::process::exit;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{sleep, Duration};
// A container type is created for inserting into the Client's `data`, which
// allows for data to be accessible across all events and framework commands, or
// anywhere else that has a copy of the `data` Arc.
struct ShardManagerContainer;
impl TypeMapKey for ShardManagerContainer {
type Value = Arc<Mutex<ShardManager>>;
}
impl TypeMapKey for db::MyDbContext {
type Value = db::MyDbContext;
}
impl TypeMapKey for autopanic::Gramma {
type Value = autopanic::Gramma;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn guild_create(&self, ctx: Context, guild: Guild, is_new: bool) {
let mut data = ctx.data.write().await;
let mut dbcontext = data
.get_mut::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.");
let id = &guild.id.0;
if let Some(s) = dbcontext.fetch_settings(id).await {
println!("Found guild {} settings", id);
dbcontext.cache.insert(*id, s);
} else {
println!("Creating a new settings row for guild {}", id);
dbcontext.add_guild(id).await; // also adds to cache
//greet_new_guild(&ctx, &guild).await;
};
set_status(&ctx).await;
}
async fn channel_pins_update(&self, ctx: Context, _pins: ChannelPinsUpdateEvent) {
println!("yeet doing a garbage run");
garbage_collect(&ctx);
println!("done");
}
async fn guild_member_addition(&self, ctx: Context, guild_id: GuildId, mut new_member: Member) {
println!("new member joined {}: {}", guild_id, new_member.user.name);
{
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild_id.0);
mom.recent_users.insert(
new_member
.joined_at
.unwrap()
.timestamp_millis()
.try_into()
.unwrap(),
new_member.user.id.0,
);
}
check_against_joins(&ctx, guild_id.0).await;
check_against_blacklist(&ctx, new_member, guild_id.0).await;
}
async fn message(&self, ctx: Context, new_message: Message) {
/*
if new_message.content.len() > 20_usize {
println!("Message! {}...", &new_message.content[..19]);
} else {
println!("Message! {}", &new_message.content);
}*/
// we use the message timestamp instead of time::now because of potential lag of events
let timestamp: u64 = new_message.timestamp.timestamp_millis().try_into().unwrap();
let guild = new_message.guild_id.unwrap().0;
let author = new_message.author.id.0;
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild);
if!new_message.mentions.is_empty() {
mom.userpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if!new_message.mention_roles.is_empty() {
mom.rollpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if!new_message.mention_roles.is_empty() ||!new_message.mentions.is_empty() {
autopanic::check_against_pings(&ctx, mom, guild).await;
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
pub async fn better_default_channel(guild: &Guild, uid: UserId) -> Option<Vec<&GuildChannel>> {
let member = guild.members.get(&uid)?;
let mut out = vec![];
for channel in guild.channels.values() {
if channel.kind == ChannelType::Text
&& guild
.user_permissions_in(channel, member)
.ok()?
.send_messages()
&& guild
.user_permissions_in(channel, member)
.ok()?
.read_messages()
{
let x = guild.user_permissions_in(channel, member).expect("goo");
//return Some(channel);
dbg!(x);
println!("{:?}", x.bits);
println!("{}", channel.name);
out.push(channel);
}
}
if out.is_empty() | else {
Some(out)
}
}
async fn greet_new_guild(ctx: &Context, guild: &Guild) {
println!("h");
if let Some(channelvec) = better_default_channel(guild, UserId(802019556801511424_u64)).await {
println!("i");
for channel in channelvec {
println!("{}", channel.name);
let res = channel.say(&ctx, "
Thanks for adding me to the server! Here's some next steps:\n
Configure who can run most commands (like turning on or off panic mode): run `bb-settings set roll_that_can_panic Staff` for example (if you have a roll called Staff)\n
I recommend that you set up a log channel for me to talk in (and set it like `bb-settings set logs #mychannel` but replace mychannel with the actual one) \n
Also probs tell me a roll for me to ping when I automatically detect a raid and go into panic mode (`bb-settings set notify raidresponders` - replacing raidresponders with that roll)\n
Reviewing default settings is recommended - `bb-settings` and adjust them as you wish. `bb-help` shows all my commands.\n
If you find yourself needing support, there's a support server invite in `bb-about`\
").await;
if res.is_ok() {
return;
}
}
} else {
println!(
"hey i wanted to greet {} {} but they wont let everyone talk",
guild.name, guild.id.0
);
}
}
async fn set_status(ctx: &Context) {
ctx.shard.set_status(OnlineStatus::DoNotDisturb);
let s = format!("to {} guilds | bb-help", ctx.cache.guild_count().await);
ctx.shard.set_activity(Some(Activity::listening(&*s)));
}
#[group]
#[commands(panic, uinfo, forceban, help, delete)]
struct General;
#[group]
#[commands(about, ping, die, update, free, git_push, garbage, foo)] // status)]
struct Meta;
#[group]
// Sets multiple prefixes for a group.
// This requires us to call commands in this group
// via `~emoji` (or `~em`) instead of just `~`.
#[prefixes("settings", "s")]
// Set a description to appear if a user wants to display a single group
// e.g. via help using the group-name or one of its prefixes.
// Summary only appears when listing multiple groups.
// Sets a command that will be executed if only a group-prefix was passed.
#[default_command(show)]
#[commands(reset, set)]
struct Settings;
#[group]
#[prefixes("blacklist", "bl")]
#[default_command(blacklist_show)]
#[commands(remove, add)]
struct Blacklist;
#[hook] // this appears not to work
async fn before(ctx: &Context, msg: &Message, command_name: &str) -> bool {
println!(
"Got command '{}' by user '{}'",
command_name, msg.author.name
);
true // if `before` returns false, command processing doesn't happen.
}
#[hook]
async fn after(_ctx: &Context, _msg: &Message, command_name: &str, command_result: CommandResult) {
match command_result {
Ok(()) => println!("Processed command '{}'", command_name),
Err(why) => println!("Command '{}' returned error {:?}", command_name, why),
}
}
#[hook]
async fn unknown_command(_ctx: &Context, _msg: &Message, unknown_command_name: &str) {
println!("Could not find command named '{}'", unknown_command_name);
}
#[hook]
async fn dispatch_error(ctx: &Context, msg: &Message, error: DispatchError) {
if let DispatchError::Ratelimited(info) = error {
// We notify them only once.
if info.is_first_try {
let _ = msg
.channel_id
.say(
&ctx.http,
&format!("Try this again in {} seconds.", info.as_secs()),
)
.await;
}
}
}
#[tokio::main]
async fn main() {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
println!("{:?}", since_the_epoch);
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's owners and id
let bot_id = match http.get_current_application_info().await {
Ok(_) => match http.get_current_user().await {
Ok(bot_id) => bot_id.id,
Err(why) => panic!("Could not access the bot id: {:?}", why),
},
Err(why) => panic!("Could not access application info: {:?}", why),
};
let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true)
.on_mention(Some(bot_id))
.prefix("bb-")
.case_insensitivity(true)
.allow_dm(false)
})
.unrecognised_command(unknown_command)
// Set a function that's called whenever a command's execution didn't complete for one
// reason or another. For example, when a user has exceeded a rate-limit or a command
// can only be performed by the bot owner.
.on_dispatch_error(dispatch_error)
// The `#[group]` macro generates `static` instances of the options set for the group.
// They're made in the pattern: `#name_GROUP` for the group instance and `#name_GROUP_OPTIONS`.
// #name is turned all uppercase
.group(&GENERAL_GROUP)
.group(&BLACKLIST_GROUP)
.group(&SETTINGS_GROUP)
.group(&META_GROUP);
let mut client = Client::builder(&token)
.event_handler(Handler)
.framework(framework)
.intents(
GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::privileged(),
)
.await
.expect("Err creating client");
{
let conn = sqlx::SqlitePool::connect("db.sqlite").await;
let mut data = client.data.write().await;
data.insert::<db::MyDbContext>(MyDbContext::new(conn.unwrap()));
data.insert::<autopanic::Gramma>(autopanic::Gramma::new());
data.insert::<ShardManagerContainer>(Arc::clone(&client.shard_manager));
}
if let Err(why) = client.start().await {
println!("Client error: {:?}", why);
}
}
async fn garbage_collect(ctx: &Context) {
let now = autopanic::time_now();
let mut data = ctx.data.write().await;
let settings_map = &data
.get::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.").cache.clone();
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
// iterate over gramma to get your mom for each guild
// each your mom will have a settings attached, as well as memory of joins and whatnot
// make and save a new list of just the joins that are currently relevant and discard the previous
for (k, v) in grammy.guild_mamas.iter_mut() {
if let Some(settings) = settings_map.get(&k) {
let max_age = settings.time; // duration we keep join records, in seconds
let mut new_recent_users: HashMap<u64, u64> = HashMap::new();
for (timestamp, user) in v.recent_users.iter_mut() { // timestamp joined, userid
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_recent_users.insert(*timestamp, *user);
}
}
v.recent_users = new_recent_users;
let max_age = settings.mentiontime;
let mut new_userpings: HashMap<u64, (usize, u64)> = HashMap::new();
for (timestamp, user) in v.userpings.iter() {
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_userpings.insert(*timestamp, *user);
}
}
v.userpings = new_userpings;
let mut new_rollpings: HashMap<u64, (usize, u64)> = HashMap::new();
for (timestamp, user) in v.rollpings.iter() {
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_rollpings.insert(*timestamp, *user);
}
}
v.rollpings = new_rollpings;
}
}
}
| {
None
} | conditional_block |
main.rs | #![allow(unused)]
#![allow(non_snake_case)]
use crate::db::MyDbContext;
use serenity::model::prelude::*;
use sqlx::Result;
use serenity::{
async_trait,
client::bridge::gateway::{GatewayIntents, ShardId, ShardManager},
framework::standard::{
buckets::{LimitedFor, RevertBucket},
help_commands,
macros::{command, group, help, hook},
Args, CommandGroup, CommandOptions, CommandResult, DispatchError, HelpOptions, Reason,
StandardFramework,
},
http::Http,
model::{
channel::{Channel, Message},
gateway::Ready,
guild::Guild,
id::UserId,
permissions::Permissions,
},
utils::{content_safe, ContentSafeOptions},
};
use std::{
collections::{HashMap, HashSet},
env,
fmt::Write,
sync::Arc,
};
use serenity::prelude::*;
use tokio::sync::Mutex;
mod admin_commands;
mod autopanic;
mod blob_blacklist_conversions;
mod commands;
mod db;
use crate::admin_commands::*;
use crate::autopanic::*;
use crate::commands::*;
use std::convert::TryInto;
use std::process::exit;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{sleep, Duration};
// A container type is created for inserting into the Client's `data`, which
// allows for data to be accessible across all events and framework commands, or
// anywhere else that has a copy of the `data` Arc.
struct ShardManagerContainer;
impl TypeMapKey for ShardManagerContainer {
type Value = Arc<Mutex<ShardManager>>;
}
impl TypeMapKey for db::MyDbContext {
type Value = db::MyDbContext;
}
impl TypeMapKey for autopanic::Gramma {
type Value = autopanic::Gramma;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn guild_create(&self, ctx: Context, guild: Guild, is_new: bool) {
let mut data = ctx.data.write().await;
let mut dbcontext = data
.get_mut::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.");
let id = &guild.id.0;
if let Some(s) = dbcontext.fetch_settings(id).await {
println!("Found guild {} settings", id);
dbcontext.cache.insert(*id, s);
} else {
println!("Creating a new settings row for guild {}", id);
dbcontext.add_guild(id).await; // also adds to cache
//greet_new_guild(&ctx, &guild).await;
};
set_status(&ctx).await;
}
async fn channel_pins_update(&self, ctx: Context, _pins: ChannelPinsUpdateEvent) {
println!("yeet doing a garbage run");
garbage_collect(&ctx);
println!("done");
}
async fn guild_member_addition(&self, ctx: Context, guild_id: GuildId, mut new_member: Member) {
println!("new member joined {}: {}", guild_id, new_member.user.name);
{
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild_id.0);
mom.recent_users.insert(
new_member
.joined_at
.unwrap()
.timestamp_millis()
.try_into()
.unwrap(),
new_member.user.id.0,
);
}
check_against_joins(&ctx, guild_id.0).await;
check_against_blacklist(&ctx, new_member, guild_id.0).await;
}
async fn message(&self, ctx: Context, new_message: Message) {
/*
if new_message.content.len() > 20_usize {
println!("Message! {}...", &new_message.content[..19]);
} else {
println!("Message! {}", &new_message.content);
}*/
// we use the message timestamp instead of time::now because of potential lag of events
let timestamp: u64 = new_message.timestamp.timestamp_millis().try_into().unwrap();
let guild = new_message.guild_id.unwrap().0;
let author = new_message.author.id.0;
let mut data = ctx.data.write().await;
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
let mut mom = grammy.get(&guild);
if!new_message.mentions.is_empty() {
mom.userpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if!new_message.mention_roles.is_empty() {
mom.rollpings
.insert(timestamp, (new_message.mentions.len(), author));
}
if!new_message.mention_roles.is_empty() ||!new_message.mentions.is_empty() {
autopanic::check_against_pings(&ctx, mom, guild).await;
}
}
async fn ready(&self, ctx: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
| pub async fn better_default_channel(guild: &Guild, uid: UserId) -> Option<Vec<&GuildChannel>> {
let member = guild.members.get(&uid)?;
let mut out = vec![];
for channel in guild.channels.values() {
if channel.kind == ChannelType::Text
&& guild
.user_permissions_in(channel, member)
.ok()?
.send_messages()
&& guild
.user_permissions_in(channel, member)
.ok()?
.read_messages()
{
let x = guild.user_permissions_in(channel, member).expect("goo");
//return Some(channel);
dbg!(x);
println!("{:?}", x.bits);
println!("{}", channel.name);
out.push(channel);
}
}
if out.is_empty() {
None
} else {
Some(out)
}
}
async fn greet_new_guild(ctx: &Context, guild: &Guild) {
println!("h");
if let Some(channelvec) = better_default_channel(guild, UserId(802019556801511424_u64)).await {
println!("i");
for channel in channelvec {
println!("{}", channel.name);
let res = channel.say(&ctx, "
Thanks for adding me to the server! Here's some next steps:\n
Configure who can run most commands (like turning on or off panic mode): run `bb-settings set roll_that_can_panic Staff` for example (if you have a roll called Staff)\n
I recommend that you set up a log channel for me to talk in (and set it like `bb-settings set logs #mychannel` but replace mychannel with the actual one) \n
Also probs tell me a roll for me to ping when I automatically detect a raid and go into panic mode (`bb-settings set notify raidresponders` - replacing raidresponders with that roll)\n
Reviewing default settings is recommended - `bb-settings` and adjust them as you wish. `bb-help` shows all my commands.\n
If you find yourself needing support, there's a support server invite in `bb-about`\
").await;
if res.is_ok() {
return;
}
}
} else {
println!(
"hey i wanted to greet {} {} but they wont let everyone talk",
guild.name, guild.id.0
);
}
}
async fn set_status(ctx: &Context) {
ctx.shard.set_status(OnlineStatus::DoNotDisturb);
let s = format!("to {} guilds | bb-help", ctx.cache.guild_count().await);
ctx.shard.set_activity(Some(Activity::listening(&*s)));
}
#[group]
#[commands(panic, uinfo, forceban, help, delete)]
struct General;
#[group]
#[commands(about, ping, die, update, free, git_push, garbage, foo)] // status)]
struct Meta;
#[group]
// Sets multiple prefixes for a group.
// This requires us to call commands in this group
// via `~emoji` (or `~em`) instead of just `~`.
#[prefixes("settings", "s")]
// Set a description to appear if a user wants to display a single group
// e.g. via help using the group-name or one of its prefixes.
// Summary only appears when listing multiple groups.
// Sets a command that will be executed if only a group-prefix was passed.
#[default_command(show)]
#[commands(reset, set)]
struct Settings;
#[group]
#[prefixes("blacklist", "bl")]
#[default_command(blacklist_show)]
#[commands(remove, add)]
struct Blacklist;
#[hook] // this appears not to work
async fn before(ctx: &Context, msg: &Message, command_name: &str) -> bool {
println!(
"Got command '{}' by user '{}'",
command_name, msg.author.name
);
true // if `before` returns false, command processing doesn't happen.
}
#[hook]
async fn after(_ctx: &Context, _msg: &Message, command_name: &str, command_result: CommandResult) {
match command_result {
Ok(()) => println!("Processed command '{}'", command_name),
Err(why) => println!("Command '{}' returned error {:?}", command_name, why),
}
}
#[hook]
async fn unknown_command(_ctx: &Context, _msg: &Message, unknown_command_name: &str) {
println!("Could not find command named '{}'", unknown_command_name);
}
#[hook]
async fn dispatch_error(ctx: &Context, msg: &Message, error: DispatchError) {
if let DispatchError::Ratelimited(info) = error {
// We notify them only once.
if info.is_first_try {
let _ = msg
.channel_id
.say(
&ctx.http,
&format!("Try this again in {} seconds.", info.as_secs()),
)
.await;
}
}
}
#[tokio::main]
async fn main() {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
println!("{:?}", since_the_epoch);
// Configure the client with your Discord bot token in the environment.
let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
let http = Http::new_with_token(&token);
// We will fetch your bot's owners and id
let bot_id = match http.get_current_application_info().await {
Ok(_) => match http.get_current_user().await {
Ok(bot_id) => bot_id.id,
Err(why) => panic!("Could not access the bot id: {:?}", why),
},
Err(why) => panic!("Could not access application info: {:?}", why),
};
let framework = StandardFramework::new()
.configure(|c| {
c.with_whitespace(true)
.on_mention(Some(bot_id))
.prefix("bb-")
.case_insensitivity(true)
.allow_dm(false)
})
.unrecognised_command(unknown_command)
// Set a function that's called whenever a command's execution didn't complete for one
// reason or another. For example, when a user has exceeded a rate-limit or a command
// can only be performed by the bot owner.
.on_dispatch_error(dispatch_error)
// The `#[group]` macro generates `static` instances of the options set for the group.
// They're made in the pattern: `#name_GROUP` for the group instance and `#name_GROUP_OPTIONS`.
// #name is turned all uppercase
.group(&GENERAL_GROUP)
.group(&BLACKLIST_GROUP)
.group(&SETTINGS_GROUP)
.group(&META_GROUP);
let mut client = Client::builder(&token)
.event_handler(Handler)
.framework(framework)
.intents(
GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::privileged(),
)
.await
.expect("Err creating client");
{
let conn = sqlx::SqlitePool::connect("db.sqlite").await;
let mut data = client.data.write().await;
data.insert::<db::MyDbContext>(MyDbContext::new(conn.unwrap()));
data.insert::<autopanic::Gramma>(autopanic::Gramma::new());
data.insert::<ShardManagerContainer>(Arc::clone(&client.shard_manager));
}
if let Err(why) = client.start().await {
println!("Client error: {:?}", why);
}
}
async fn garbage_collect(ctx: &Context) {
let now = autopanic::time_now();
let mut data = ctx.data.write().await;
let settings_map = &data
.get::<MyDbContext>()
.expect("Expected MyDbContext in TypeMap.").cache.clone();
let mut grammy = data
.get_mut::<autopanic::Gramma>()
.expect("Expected your momma in TypeMap.");
// iterate over gramma to get your mom for each guild
// each your mom will have a settings attached, as well as memory of joins and whatnot
// make and save a new list of just the joins that are currently relevant and discard the previous
for (k, v) in grammy.guild_mamas.iter_mut() {
if let Some(settings) = settings_map.get(&k) {
let max_age = settings.time; // duration we keep join records, in seconds
let mut new_recent_users: HashMap<u64, u64> = HashMap::new();
for (timestamp, user) in v.recent_users.iter_mut() { // timestamp joined, userid
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_recent_users.insert(*timestamp, *user);
}
}
v.recent_users = new_recent_users;
let max_age = settings.mentiontime;
let mut new_userpings: HashMap<u64, (usize, u64)> = HashMap::new();
for (timestamp, user) in v.userpings.iter() {
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_userpings.insert(*timestamp, *user);
}
}
v.userpings = new_userpings;
let mut new_rollpings: HashMap<u64, (usize, u64)> = HashMap::new();
for (timestamp, user) in v.rollpings.iter() {
if!autopanic::time_is_past(*timestamp, max_age as u64) {
new_rollpings.insert(*timestamp, *user);
}
}
v.rollpings = new_rollpings;
}
}
} | random_line_split |
|
irc_comm.rs | use super::bot_cmd;
use super::irc_msgs::is_msg_to_nick;
use super::irc_msgs::OwningMsgPrefix;
use super::irc_send::push_to_outbox;
use super::irc_send::OutboxPort;
use super::parse_msg_to_nick;
use super::pkg_info;
use super::reaction::LibReaction;
use super::trigger;
use super::BotCmdResult;
use super::ErrorKind;
use super::MsgDest;
use super::MsgMetadata;
use super::MsgPrefix;
use super::Reaction;
use super::Result;
use super::ServerId;
use super::State;
use irc::client::prelude as aatxe;
use irc::proto::Message;
use itertools::Itertools;
use smallvec::SmallVec;
use std::borrow::Borrow;
use std::borrow::Cow;
use std::cmp;
use std::fmt::Display;
use std::sync::Arc;
use std::thread;
const UPDATE_MSG_PREFIX_STR: &'static str = "!!! UPDATE MESSAGE PREFIX!!!";
impl State {
fn compose_msg<S1, S2>(
&self,
dest: MsgDest,
addressee: S1,
msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
| ));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn mk_quit<'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO,..),
..
} => {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
}
_ => Ok(()),
}
}
fn handle_privmsg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
prefix: OwningMsgPrefix,
target: String,
msg: String,
) -> Result<()> {
trace!(
"[{}] Handling PRIVMSG: {:?}",
state.server_socket_addr_dbg_string(server_id),
msg
);
let bot_nick = state.nick(server_id)?;
if!is_msg_to_nick(&target, &msg, &bot_nick) {
return Ok(());
}
if prefix.parse().nick == Some(&target) && msg.trim() == UPDATE_MSG_PREFIX_STR {
update_prefix_info(state, server_id, &prefix.parse())
} else {
// This could take a while or panic, so do it in a new thread.
// These are cheap to clone, supposedly.
let state = state.clone();
let outbox = outbox.clone();
let thread_spawn_result = thread::Builder::new().spawn(move || {
let lib_reaction =
handle_bot_command_or_trigger(&state, server_id, prefix, target, msg, bot_nick);
push_to_outbox(&outbox, server_id, lib_reaction);
});
match thread_spawn_result {
Ok(thread::JoinHandle {.. }) => Ok(()),
Err(e) => Err(ErrorKind::ThreadSpawnFailure(e).into()),
}
}
}
fn update_prefix_info(state: &State, _server_id: ServerId, prefix: &MsgPrefix) -> Result<()> {
debug!(
"Updating stored message prefix information from received {:?}",
prefix
);
match state.msg_prefix.write() {
Ok(guard) => guard,
Err(poisoned_guard) => {
// The lock was poisoned, you say? That's strange, unfortunate, and unlikely to be a
// problem here, because we're just going to overwrite the contents anyway.
warn!(
"Stored message prefix was poisoned by thread panic! Discarding it, replacing it, \
and moving on."
);
poisoned_guard.into_inner()
}
}.update_from(prefix);
Ok(())
}
fn handle_004(state: &State, server_id: ServerId) -> Result<LibReaction<Message>> {
// The server has finished sending the protocol-mandated welcome messages.
send_msg_prefix_update_request(state, server_id)
}
// TODO: Run `send_msg_prefix_update_request` periodically.
fn send_msg_prefix_update_request(
state: &State,
server_id: ServerId,
) -> Result<LibReaction<Message>> {
Ok(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
state.nick(server_id)?.to_owned(),
UPDATE_MSG_PREFIX_STR.to_owned(),
).into(),
))
}
| {
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(), | identifier_body |
irc_comm.rs | use super::bot_cmd;
use super::irc_msgs::is_msg_to_nick;
use super::irc_msgs::OwningMsgPrefix;
use super::irc_send::push_to_outbox;
use super::irc_send::OutboxPort;
use super::parse_msg_to_nick;
use super::pkg_info;
use super::reaction::LibReaction;
use super::trigger;
use super::BotCmdResult;
use super::ErrorKind;
use super::MsgDest;
use super::MsgMetadata;
use super::MsgPrefix;
use super::Reaction;
use super::Result;
use super::ServerId;
use super::State;
use irc::client::prelude as aatxe;
use irc::proto::Message;
use itertools::Itertools;
use smallvec::SmallVec;
use std::borrow::Borrow;
use std::borrow::Cow;
use std::cmp;
use std::fmt::Display;
use std::sync::Arc;
use std::thread;
const UPDATE_MSG_PREFIX_STR: &'static str = "!!! UPDATE MESSAGE PREFIX!!!";
impl State {
fn compose_msg<S1, S2>(
&self,
dest: MsgDest,
addressee: S1,
msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
{
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(),
));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn | <'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO,..),
..
} => {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
}
_ => Ok(()),
}
}
fn handle_privmsg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
prefix: OwningMsgPrefix,
target: String,
msg: String,
) -> Result<()> {
trace!(
"[{}] Handling PRIVMSG: {:?}",
state.server_socket_addr_dbg_string(server_id),
msg
);
let bot_nick = state.nick(server_id)?;
if!is_msg_to_nick(&target, &msg, &bot_nick) {
return Ok(());
}
if prefix.parse().nick == Some(&target) && msg.trim() == UPDATE_MSG_PREFIX_STR {
update_prefix_info(state, server_id, &prefix.parse())
} else {
// This could take a while or panic, so do it in a new thread.
// These are cheap to clone, supposedly.
let state = state.clone();
let outbox = outbox.clone();
let thread_spawn_result = thread::Builder::new().spawn(move || {
let lib_reaction =
handle_bot_command_or_trigger(&state, server_id, prefix, target, msg, bot_nick);
push_to_outbox(&outbox, server_id, lib_reaction);
});
match thread_spawn_result {
Ok(thread::JoinHandle {.. }) => Ok(()),
Err(e) => Err(ErrorKind::ThreadSpawnFailure(e).into()),
}
}
}
fn update_prefix_info(state: &State, _server_id: ServerId, prefix: &MsgPrefix) -> Result<()> {
debug!(
"Updating stored message prefix information from received {:?}",
prefix
);
match state.msg_prefix.write() {
Ok(guard) => guard,
Err(poisoned_guard) => {
// The lock was poisoned, you say? That's strange, unfortunate, and unlikely to be a
// problem here, because we're just going to overwrite the contents anyway.
warn!(
"Stored message prefix was poisoned by thread panic! Discarding it, replacing it, \
and moving on."
);
poisoned_guard.into_inner()
}
}.update_from(prefix);
Ok(())
}
fn handle_004(state: &State, server_id: ServerId) -> Result<LibReaction<Message>> {
// The server has finished sending the protocol-mandated welcome messages.
send_msg_prefix_update_request(state, server_id)
}
// TODO: Run `send_msg_prefix_update_request` periodically.
fn send_msg_prefix_update_request(
state: &State,
server_id: ServerId,
) -> Result<LibReaction<Message>> {
Ok(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
state.nick(server_id)?.to_owned(),
UPDATE_MSG_PREFIX_STR.to_owned(),
).into(),
))
}
| mk_quit | identifier_name |
irc_comm.rs | use super::bot_cmd;
use super::irc_msgs::is_msg_to_nick;
use super::irc_msgs::OwningMsgPrefix;
use super::irc_send::push_to_outbox;
use super::irc_send::OutboxPort;
use super::parse_msg_to_nick;
use super::pkg_info;
use super::reaction::LibReaction;
use super::trigger;
use super::BotCmdResult;
use super::ErrorKind;
use super::MsgDest;
use super::MsgMetadata;
use super::MsgPrefix;
use super::Reaction;
use super::Result;
use super::ServerId;
use super::State;
use irc::client::prelude as aatxe;
use irc::proto::Message;
use itertools::Itertools;
use smallvec::SmallVec;
use std::borrow::Borrow;
use std::borrow::Cow;
use std::cmp;
use std::fmt::Display;
use std::sync::Arc;
use std::thread;
const UPDATE_MSG_PREFIX_STR: &'static str = "!!! UPDATE MESSAGE PREFIX!!!";
impl State {
fn compose_msg<S1, S2>(
&self,
dest: MsgDest,
addressee: S1,
msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
{
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(),
));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn mk_quit<'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO,..),
..
} => {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
}
_ => Ok(()),
}
}
fn handle_privmsg( | state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
prefix: OwningMsgPrefix,
target: String,
msg: String,
) -> Result<()> {
trace!(
"[{}] Handling PRIVMSG: {:?}",
state.server_socket_addr_dbg_string(server_id),
msg
);
let bot_nick = state.nick(server_id)?;
if!is_msg_to_nick(&target, &msg, &bot_nick) {
return Ok(());
}
if prefix.parse().nick == Some(&target) && msg.trim() == UPDATE_MSG_PREFIX_STR {
update_prefix_info(state, server_id, &prefix.parse())
} else {
// This could take a while or panic, so do it in a new thread.
// These are cheap to clone, supposedly.
let state = state.clone();
let outbox = outbox.clone();
let thread_spawn_result = thread::Builder::new().spawn(move || {
let lib_reaction =
handle_bot_command_or_trigger(&state, server_id, prefix, target, msg, bot_nick);
push_to_outbox(&outbox, server_id, lib_reaction);
});
match thread_spawn_result {
Ok(thread::JoinHandle {.. }) => Ok(()),
Err(e) => Err(ErrorKind::ThreadSpawnFailure(e).into()),
}
}
}
fn update_prefix_info(state: &State, _server_id: ServerId, prefix: &MsgPrefix) -> Result<()> {
debug!(
"Updating stored message prefix information from received {:?}",
prefix
);
match state.msg_prefix.write() {
Ok(guard) => guard,
Err(poisoned_guard) => {
// The lock was poisoned, you say? That's strange, unfortunate, and unlikely to be a
// problem here, because we're just going to overwrite the contents anyway.
warn!(
"Stored message prefix was poisoned by thread panic! Discarding it, replacing it, \
and moving on."
);
poisoned_guard.into_inner()
}
}.update_from(prefix);
Ok(())
}
fn handle_004(state: &State, server_id: ServerId) -> Result<LibReaction<Message>> {
// The server has finished sending the protocol-mandated welcome messages.
send_msg_prefix_update_request(state, server_id)
}
// TODO: Run `send_msg_prefix_update_request` periodically.
fn send_msg_prefix_update_request(
state: &State,
server_id: ServerId,
) -> Result<LibReaction<Message>> {
Ok(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
state.nick(server_id)?.to_owned(),
UPDATE_MSG_PREFIX_STR.to_owned(),
).into(),
))
} | random_line_split |
|
irc_comm.rs | use super::bot_cmd;
use super::irc_msgs::is_msg_to_nick;
use super::irc_msgs::OwningMsgPrefix;
use super::irc_send::push_to_outbox;
use super::irc_send::OutboxPort;
use super::parse_msg_to_nick;
use super::pkg_info;
use super::reaction::LibReaction;
use super::trigger;
use super::BotCmdResult;
use super::ErrorKind;
use super::MsgDest;
use super::MsgMetadata;
use super::MsgPrefix;
use super::Reaction;
use super::Result;
use super::ServerId;
use super::State;
use irc::client::prelude as aatxe;
use irc::proto::Message;
use itertools::Itertools;
use smallvec::SmallVec;
use std::borrow::Borrow;
use std::borrow::Cow;
use std::cmp;
use std::fmt::Display;
use std::sync::Arc;
use std::thread;
const UPDATE_MSG_PREFIX_STR: &'static str = "!!! UPDATE MESSAGE PREFIX!!!";
impl State {
fn compose_msg<S1, S2>(
&self,
dest: MsgDest,
addressee: S1,
msg: S2,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
{
let final_msg = format!(
"{}{}{}",
addressee.borrow(),
if addressee.borrow().is_empty() {
""
} else {
&self.addressee_suffix
},
msg,
);
info!("Sending message to {:?}: {:?}", dest, final_msg);
let mut wrapped_msg = SmallVec::<[_; 1]>::new();
for input_line in final_msg.lines() {
wrap_msg(self, dest, input_line, |output_line| {
wrapped_msg.push(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(dest.target.to_owned(), output_line.to_owned()).into(),
));
Ok(())
})?;
}
match wrapped_msg.len() {
0 => Ok(None),
1 => Ok(Some(wrapped_msg.remove(0))),
_ => Ok(Some(LibReaction::Multi(wrapped_msg.into_vec()))),
}
}
fn compose_msgs<S1, S2, M>(
&self,
dest: MsgDest,
addressee: S1,
msgs: M,
) -> Result<Option<LibReaction<Message>>>
where
S1: Borrow<str>,
S2: Display,
M: IntoIterator<Item = S2>,
{
// Not `SmallVec`, because we're guessing that the caller expects multiple messages.
let mut output = Vec::new();
for msg in msgs {
match self.compose_msg(dest, addressee.borrow(), msg)? {
Some(m) => output.push(m),
None => {}
}
}
match output.len() {
0 => Ok(None),
1 => Ok(Some(output.remove(0))),
_ => Ok(Some(LibReaction::Multi(output))),
}
}
fn prefix_len(&self, server_id: ServerId) -> Result<usize> {
Ok(self.read_msg_prefix(server_id)?.len())
}
}
fn wrap_msg<F>(
state: &State,
MsgDest { server_id, target }: MsgDest,
msg: &str,
mut f: F,
) -> Result<()>
where
F: FnMut(&str) -> Result<()>,
{
// :nick!user@host PRIVMSG target :message
// :nick!user@host NOTICE target :message
let raw_len_limit = 512;
let punctuation_len = {
let line_terminator_len = 2;
let spaces = 3;
let colons = 2;
colons + spaces + line_terminator_len
};
let cmd_len = "PRIVMSG".len();
let metadata_len = state.prefix_len(server_id)? + cmd_len + target.len() + punctuation_len;
let msg_len_limit = raw_len_limit - metadata_len;
if msg.len() < msg_len_limit {
return f(msg);
}
let mut split_end_idx = 0;
let lines = msg.match_indices(char::is_whitespace)
.peekable()
.batching(|iter| {
debug_assert!(msg.len() >= msg_len_limit);
let split_start_idx = split_end_idx;
if split_start_idx >= msg.len() {
return None;
}
while let Some(&(next_space_idx, _)) = iter.peek() {
if msg[split_start_idx..next_space_idx].len() < msg_len_limit {
split_end_idx = next_space_idx;
iter.next();
} else {
break;
}
}
if iter.peek().is_none() {
split_end_idx = msg.len()
} else if split_end_idx <= split_start_idx {
split_end_idx = cmp::min(split_start_idx + msg_len_limit, msg.len())
}
Some(msg[split_start_idx..split_end_idx].trim())
});
for line in lines {
f(line)?
}
Ok(())
}
fn handle_reaction(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: &str,
reaction: Reaction,
bot_nick: String,
) -> Result<Option<LibReaction<Message>>> {
let (reply_target, reply_addressee) = if target == bot_nick {
(prefix.parse().nick.unwrap(), "")
} else {
(target, prefix.parse().nick.unwrap_or(""))
};
let reply_dest = MsgDest {
server_id,
target: reply_target,
};
match reaction {
Reaction::None => Ok(None),
Reaction::Msg(s) => state.compose_msg(reply_dest, "", &s),
Reaction::Msgs(a) => state.compose_msgs(reply_dest, "", a.iter()),
Reaction::Reply(s) => state.compose_msg(reply_dest, reply_addressee, &s),
Reaction::Replies(a) => state.compose_msgs(reply_dest, reply_addressee, a.iter()),
Reaction::RawMsg(s) => Ok(Some(LibReaction::RawMsg(s.parse()?))),
Reaction::Quit(msg) => Ok(Some(mk_quit(msg))),
}
}
fn handle_bot_command_or_trigger(
state: &Arc<State>,
server_id: ServerId,
prefix: OwningMsgPrefix,
target: String,
msg: String,
bot_nick: String,
) -> Option<LibReaction<Message>> {
let reaction = (|| {
let metadata = MsgMetadata {
prefix: prefix.parse(),
dest: MsgDest {
server_id,
target: &target,
},
};
let cmd_ln = parse_msg_to_nick(&msg, metadata.dest.target, &bot_nick).unwrap_or("");
let mut cmd_name_and_args = cmd_ln.splitn(2, char::is_whitespace);
let cmd_name = cmd_name_and_args.next().unwrap_or("");
let cmd_args = cmd_name_and_args.next().unwrap_or("").trim();
if let Some(r) = bot_cmd::run(state, cmd_name, cmd_args, &metadata)? {
Ok(bot_command_reaction(cmd_name, r))
} else if let Some(r) = trigger::run_any_matching(state, cmd_ln, &metadata)? {
Ok(bot_command_reaction("<trigger>", r))
} else {
Ok(Reaction::None)
}
})();
match reaction
.and_then(|reaction| handle_reaction(state, server_id, prefix, &target, reaction, bot_nick))
{
Ok(r) => r,
Err(e) => Some(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
target,
format!("Encountered error while trying to handle message: {}", e),
).into(),
)),
}
}
fn bot_command_reaction(cmd_name: &str, result: BotCmdResult) -> Reaction {
let cmd_result = match result {
BotCmdResult::Ok(r) => Ok(r),
BotCmdResult::Unauthorized => Err(format!(
"My apologies, but you do not appear to have sufficient \
authority to use my {:?} command.",
cmd_name
).into()),
BotCmdResult::SyntaxErr => Err("Syntax error. Try my `help` command.".into()),
BotCmdResult::ArgMissing(arg_name) => Err(format!(
"Syntax error: For command {:?}, the argument {:?} \
is required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::ArgMissing1To1(arg_name) => Err(format!(
"Syntax error: When command {:?} is used \
outside of a channel, the argument {:?} is \
required, but it was not given.",
cmd_name, arg_name
).into()),
BotCmdResult::LibErr(e) => Err(format!("Error: {}", e).into()),
BotCmdResult::UserErrMsg(s) => Err(format!("User error: {}", s).into()),
BotCmdResult::BotErrMsg(s) => Err(format!("Internal error: {}", s).into()),
};
match cmd_result {
Ok(r) => r,
Err(s) => Reaction::Msg(s),
}
}
pub fn mk_quit<'a>(msg: Option<Cow<'a, str>>) -> LibReaction<Message> {
let quit = aatxe::Command::QUIT(
msg.map(Cow::into_owned)
.or_else(|| Some(pkg_info::BRIEF_CREDITS_STRING.clone())),
).into();
LibReaction::RawMsg(quit)
}
pub(super) fn handle_msg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
input_msg: Message,
) -> Result<()> {
trace!(
"[{}] Received {:?}",
state.server_socket_addr_dbg_string(server_id),
input_msg.to_string().trim_right_matches("\r\n")
);
match input_msg {
Message {
command: aatxe::Command::PRIVMSG(target, msg),
prefix,
..
} => handle_privmsg(
state,
server_id,
outbox,
OwningMsgPrefix::from_string(prefix.unwrap_or_default()),
target,
msg,
),
Message {
command: aatxe::Command::Response(aatxe::Response::RPL_MYINFO,..),
..
} => |
_ => Ok(()),
}
}
fn handle_privmsg(
state: &Arc<State>,
server_id: ServerId,
outbox: &OutboxPort,
prefix: OwningMsgPrefix,
target: String,
msg: String,
) -> Result<()> {
trace!(
"[{}] Handling PRIVMSG: {:?}",
state.server_socket_addr_dbg_string(server_id),
msg
);
let bot_nick = state.nick(server_id)?;
if!is_msg_to_nick(&target, &msg, &bot_nick) {
return Ok(());
}
if prefix.parse().nick == Some(&target) && msg.trim() == UPDATE_MSG_PREFIX_STR {
update_prefix_info(state, server_id, &prefix.parse())
} else {
// This could take a while or panic, so do it in a new thread.
// These are cheap to clone, supposedly.
let state = state.clone();
let outbox = outbox.clone();
let thread_spawn_result = thread::Builder::new().spawn(move || {
let lib_reaction =
handle_bot_command_or_trigger(&state, server_id, prefix, target, msg, bot_nick);
push_to_outbox(&outbox, server_id, lib_reaction);
});
match thread_spawn_result {
Ok(thread::JoinHandle {.. }) => Ok(()),
Err(e) => Err(ErrorKind::ThreadSpawnFailure(e).into()),
}
}
}
fn update_prefix_info(state: &State, _server_id: ServerId, prefix: &MsgPrefix) -> Result<()> {
debug!(
"Updating stored message prefix information from received {:?}",
prefix
);
match state.msg_prefix.write() {
Ok(guard) => guard,
Err(poisoned_guard) => {
// The lock was poisoned, you say? That's strange, unfortunate, and unlikely to be a
// problem here, because we're just going to overwrite the contents anyway.
warn!(
"Stored message prefix was poisoned by thread panic! Discarding it, replacing it, \
and moving on."
);
poisoned_guard.into_inner()
}
}.update_from(prefix);
Ok(())
}
fn handle_004(state: &State, server_id: ServerId) -> Result<LibReaction<Message>> {
// The server has finished sending the protocol-mandated welcome messages.
send_msg_prefix_update_request(state, server_id)
}
// TODO: Run `send_msg_prefix_update_request` periodically.
fn send_msg_prefix_update_request(
state: &State,
server_id: ServerId,
) -> Result<LibReaction<Message>> {
Ok(LibReaction::RawMsg(
aatxe::Command::PRIVMSG(
state.nick(server_id)?.to_owned(),
UPDATE_MSG_PREFIX_STR.to_owned(),
).into(),
))
}
| {
push_to_outbox(outbox, server_id, handle_004(state, server_id)?);
Ok(())
} | conditional_block |
settings.rs | use super::{
AddCertToStore, AddExtraChainCert, DerExportError, FileOpenFailed, FileReadFailed, MaybeTls,
NewStoreBuilder, ParsePkcs12, Pkcs12Error, PrivateKeyParseError, Result, SetCertificate,
SetPrivateKey, SetVerifyCert, TlsError, TlsIdentityError, X509ParseError,
};
use openssl::{
pkcs12::{ParsedPkcs12, Pkcs12},
pkey::{PKey, Private},
ssl::{ConnectConfiguration, SslContextBuilder, SslVerifyMode},
x509::{store::X509StoreBuilder, X509},
};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use std::fmt::{self, Debug, Formatter};
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct TlsConfig {
pub enabled: Option<bool>,
#[serde(flatten)]
pub options: TlsOptions,
}
/// Standard TLS options
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct TlsOptions {
pub verify_certificate: Option<bool>,
pub verify_hostname: Option<bool>,
pub ca_path: Option<PathBuf>,
pub crt_path: Option<PathBuf>,
pub key_path: Option<PathBuf>,
pub key_pass: Option<String>,
}
/// Directly usable settings for TLS connectors
#[derive(Clone, Default)]
pub struct TlsSettings {
verify_certificate: bool,
pub(super) verify_hostname: bool,
authority: Option<X509>,
pub(super) identity: Option<IdentityStore>, // openssl::pkcs12::ParsedPkcs12 doesn't impl Clone yet
}
#[derive(Clone)]
pub struct IdentityStore(Vec<u8>, String);
impl TlsSettings {
/// Generate a filled out settings struct from the given optional
/// option set, interpreted as client options. If `options` is
/// `None`, the result is set to defaults (ie empty).
pub fn from_options(options: &Option<TlsOptions>) -> Result<Self> {
Self::from_options_base(options, false)
}
pub(super) fn from_options_base(
options: &Option<TlsOptions>,
for_server: bool,
) -> Result<Self> {
let default = TlsOptions::default();
let options = options.as_ref().unwrap_or(&default);
if!for_server {
if options.verify_certificate == Some(false) {
warn!(
"`verify_certificate` is DISABLED, this may lead to security vulnerabilities"
);
}
if options.verify_hostname == Some(false) {
warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities");
}
}
if options.key_path.is_some() && options.crt_path.is_none() {
return Err(TlsError::MissingCrtKeyFile.into());
}
let authority = match options.ca_path {
None => None,
Some(ref path) => Some(load_x509(path)?),
};
let identity = match options.crt_path {
None => None,
Some(ref crt_path) => {
let name = crt_path.to_string_lossy().to_string();
let cert_data = open_read(crt_path, "certificate")?;
let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or("");
match Pkcs12::from_der(&cert_data) {
// Certificate file is DER encoded PKCS#12 archive
Ok(pkcs12) => {
// Verify password
pkcs12.parse(&key_pass).context(ParsePkcs12)?;
Some(IdentityStore(cert_data, key_pass.to_string()))
}
Err(source) => {
if options.key_path.is_none() {
return Err(TlsError::ParsePkcs12 { source });
}
// Identity is a PEM encoded certficate+key pair
let crt = load_x509(crt_path)?;
let key_path = options.key_path.as_ref().unwrap();
let key = load_key(&key_path, &options.key_pass)?;
let pkcs12 = Pkcs12::builder()
.build("", &name, &key, &crt)
.context(Pkcs12Error)?;
let identity = pkcs12.to_der().context(DerExportError)?;
// Build the resulting parsed PKCS#12 archive,
// but don't store it, as it cannot be cloned.
// This is just for error checking.
pkcs12.parse("").context(TlsIdentityError)?;
Some(IdentityStore(identity, "".into()))
}
}
}
};
Ok(Self {
verify_certificate: options.verify_certificate.unwrap_or(!for_server),
verify_hostname: options.verify_hostname.unwrap_or(!for_server),
authority,
identity,
})
}
fn identity(&self) -> Option<ParsedPkcs12> {
// This data was test-built previously, so we can just use it
// here and expect the results will not fail. This can all be
// reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone
// impl.
self.identity.as_ref().map(|identity| {
Pkcs12::from_der(&identity.0)
.expect("Could not build PKCS#12 archive from parsed data")
.parse(&identity.1)
.expect("Could not parse stored PKCS#12 archive")
})
}
pub(super) fn apply_context(&self, context: &mut SslContextBuilder) -> Result<()> {
context.set_verify(if self.verify_certificate {
SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT
} else {
SslVerifyMode::NONE
});
if let Some(identity) = self.identity() {
context
.set_certificate(&identity.cert)
.context(SetCertificate)?;
context
.set_private_key(&identity.pkey)
.context(SetPrivateKey)?;
if let Some(chain) = identity.chain {
for cert in chain {
context
.add_extra_chain_cert(cert)
.context(AddExtraChainCert)?;
}
}
}
if let Some(certificate) = &self.authority {
let mut store = X509StoreBuilder::new().context(NewStoreBuilder)?;
store
.add_cert(certificate.clone())
.context(AddCertToStore)?;
context
.set_verify_cert_store(store.build())
.context(SetVerifyCert)?;
}
Ok(())
}
pub fn apply_connect_configuration(&self, connection: &mut ConnectConfiguration) {
connection.set_verify_hostname(self.verify_hostname);
}
}
impl Debug for TlsSettings {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("TlsSettings")
.field("verify_certificate", &self.verify_certificate)
.field("verify_hostname", &self.verify_hostname)
.finish()
}
}
pub type MaybeTlsSettings = MaybeTls<(), TlsSettings>;
impl MaybeTlsSettings {
/// Generate an optional settings struct from the given optional
/// configuration reference. If `config` is `None`, TLS is
/// disabled. The `for_server` parameter indicates the options
/// should be interpreted as being for a TLS server, which requires
/// an identity certificate and changes the certificate verification
/// default to false.
pub fn from_config(config: &Option<TlsConfig>, for_server: bool) -> Result<Self> {
match config {
None => Ok(Self::Raw(())), // No config, no TLS settings
Some(config) => match config.enabled.unwrap_or(false) {
false => Ok(Self::Raw(())), // Explicitly disabled, still no TLS settings
true => {
let tls =
TlsSettings::from_options_base(&Some(config.options.clone()), for_server)?;
match (for_server, &tls.identity) {
// Servers require an identity certificate
(true, None) => Err(TlsError::MissingRequiredIdentity.into()),
_ => Ok(Self::Tls(tls)),
}
}
},
}
}
}
impl From<TlsSettings> for MaybeTlsSettings {
fn from(tls: TlsSettings) -> Self {
Self::Tls(tls)
}
}
/// Load a private key from a named file
fn load_key(filename: &Path, pass_phrase: &Option<String>) -> Result<PKey<Private>> {
let data = open_read(filename, "key")?;
match pass_phrase {
None => Ok(PKey::private_key_from_der(&data)
.or_else(|_| PKey::private_key_from_pem(&data))
.with_context(|| PrivateKeyParseError { filename })?),
Some(phrase) => Ok(
PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes())
.or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes()))
.with_context(|| PrivateKeyParseError { filename })?,
),
}
}
/// Load an X.509 certificate from a named file
fn load_x509(filename: &Path) -> Result<X509> {
let data = open_read(filename, "certificate")?;
Ok(X509::from_der(&data)
.or_else(|_| X509::from_pem(&data))
.with_context(|| X509ParseError { filename })?)
}
fn open_read(filename: &Path, note: &'static str) -> Result<Vec<u8>> {
let mut text = Vec::<u8>::new();
File::open(filename)
.with_context(|| FileOpenFailed { note, filename })?
.read_to_end(&mut text)
.with_context(|| FileReadFailed { note, filename })?;
Ok(text)
}
#[cfg(test)]
mod test {
use super::*;
const TEST_PKCS12: &str = "tests/data/localhost.p12";
const TEST_PEM_CRT: &str = "tests/data/localhost.crt";
const TEST_PEM_KEY: &str = "tests/data/localhost.key";
#[test]
fn from_options_pkcs12() {
let options = TlsOptions {
crt_path: Some(TEST_PKCS12.into()),
key_pass: Some("NOPASS".into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PKCS#12 certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_pem() |
#[test]
fn from_options_ca() {
let options = TlsOptions {
ca_path: Some("tests/data/Vector_CA.crt".into()),
..Default::default()
};
let settings = TlsSettings::from_options(&Some(options))
.expect("Failed to load authority certificate");
assert!(settings.identity.is_none());
assert!(settings.authority.is_some());
}
#[test]
fn from_options_none() {
let settings = TlsSettings::from_options(&None).expect("Failed to generate null settings");
assert!(settings.identity.is_none());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_bad_certificate() {
let options = TlsOptions {
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let error = TlsSettings::from_options(&Some(options))
.expect_err("from_options failed to check certificate");
assert!(matches!(error, TlsError::MissingCrtKeyFile));
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
..Default::default()
};
let _error = TlsSettings::from_options(&Some(options))
.expect_err("from_options failed to check certificate");
// Actual error is an ASN parse, doesn't really matter
}
#[test]
fn from_config_none() {
assert!(MaybeTlsSettings::from_config(&None, true).unwrap().is_raw());
assert!(MaybeTlsSettings::from_config(&None, false)
.unwrap()
.is_raw());
}
#[test]
fn from_config_not_enabled() {
assert!(settings_from_config(None, false, false, true).is_raw());
assert!(settings_from_config(None, false, false, false).is_raw());
assert!(settings_from_config(Some(false), false, false, true).is_raw());
assert!(settings_from_config(Some(false), false, false, false).is_raw());
}
#[test]
fn from_config_fails_without_certificate() {
let config = make_config(Some(true), false, false);
let error = MaybeTlsSettings::from_config(&Some(config), true)
.expect_err("from_config failed to check for a certificate");
assert!(matches!(error, TlsError::MissingRequiredIdentity));
}
#[test]
fn from_config_with_certificate() {
let config = settings_from_config(Some(true), true, true, true);
assert!(config.is_tls());
}
fn settings_from_config(
enabled: Option<bool>,
set_crt: bool,
set_key: bool,
for_server: bool,
) -> MaybeTlsSettings {
let config = make_config(enabled, set_crt, set_key);
MaybeTlsSettings::from_config(&Some(config), for_server)
.expect("Failed to generate settings from config")
}
fn make_config(enabled: Option<bool>, set_crt: bool, set_key: bool) -> TlsConfig {
TlsConfig {
enabled,
options: TlsOptions {
crt_path: and_some(set_crt, TEST_PEM_CRT.into()),
key_path: and_some(set_key, TEST_PEM_KEY.into()),
..Default::default()
},
}
}
// This can be eliminated once the `bool_to_option` feature migrates
// out of nightly.
fn and_some<T>(src: bool, value: T) -> Option<T> {
match src {
true => Some(value),
false => None,
}
}
}
| {
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PEM certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
} | identifier_body |
settings.rs | use super::{
AddCertToStore, AddExtraChainCert, DerExportError, FileOpenFailed, FileReadFailed, MaybeTls,
NewStoreBuilder, ParsePkcs12, Pkcs12Error, PrivateKeyParseError, Result, SetCertificate,
SetPrivateKey, SetVerifyCert, TlsError, TlsIdentityError, X509ParseError,
};
use openssl::{
pkcs12::{ParsedPkcs12, Pkcs12},
pkey::{PKey, Private},
ssl::{ConnectConfiguration, SslContextBuilder, SslVerifyMode},
x509::{store::X509StoreBuilder, X509},
};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use std::fmt::{self, Debug, Formatter};
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct TlsConfig {
pub enabled: Option<bool>,
#[serde(flatten)]
pub options: TlsOptions,
}
/// Standard TLS options
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct TlsOptions {
pub verify_certificate: Option<bool>,
pub verify_hostname: Option<bool>,
pub ca_path: Option<PathBuf>,
pub crt_path: Option<PathBuf>,
pub key_path: Option<PathBuf>,
pub key_pass: Option<String>,
}
/// Directly usable settings for TLS connectors
#[derive(Clone, Default)]
pub struct TlsSettings {
verify_certificate: bool,
pub(super) verify_hostname: bool,
authority: Option<X509>,
pub(super) identity: Option<IdentityStore>, // openssl::pkcs12::ParsedPkcs12 doesn't impl Clone yet
}
#[derive(Clone)]
pub struct IdentityStore(Vec<u8>, String);
impl TlsSettings {
/// Generate a filled out settings struct from the given optional
/// option set, interpreted as client options. If `options` is
/// `None`, the result is set to defaults (ie empty).
pub fn from_options(options: &Option<TlsOptions>) -> Result<Self> {
Self::from_options_base(options, false)
}
pub(super) fn from_options_base(
options: &Option<TlsOptions>,
for_server: bool,
) -> Result<Self> {
let default = TlsOptions::default();
let options = options.as_ref().unwrap_or(&default);
if!for_server {
if options.verify_certificate == Some(false) {
warn!(
"`verify_certificate` is DISABLED, this may lead to security vulnerabilities"
);
}
if options.verify_hostname == Some(false) {
warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities");
}
}
if options.key_path.is_some() && options.crt_path.is_none() {
return Err(TlsError::MissingCrtKeyFile.into());
}
let authority = match options.ca_path {
None => None,
Some(ref path) => Some(load_x509(path)?),
};
let identity = match options.crt_path {
None => None,
Some(ref crt_path) => {
let name = crt_path.to_string_lossy().to_string();
let cert_data = open_read(crt_path, "certificate")?;
let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or("");
match Pkcs12::from_der(&cert_data) {
// Certificate file is DER encoded PKCS#12 archive
Ok(pkcs12) => {
// Verify password
pkcs12.parse(&key_pass).context(ParsePkcs12)?;
Some(IdentityStore(cert_data, key_pass.to_string()))
}
Err(source) => {
if options.key_path.is_none() {
return Err(TlsError::ParsePkcs12 { source });
}
// Identity is a PEM encoded certficate+key pair
let crt = load_x509(crt_path)?;
let key_path = options.key_path.as_ref().unwrap();
let key = load_key(&key_path, &options.key_pass)?;
let pkcs12 = Pkcs12::builder()
.build("", &name, &key, &crt)
.context(Pkcs12Error)?;
let identity = pkcs12.to_der().context(DerExportError)?;
// Build the resulting parsed PKCS#12 archive,
// but don't store it, as it cannot be cloned.
// This is just for error checking.
pkcs12.parse("").context(TlsIdentityError)?;
Some(IdentityStore(identity, "".into()))
}
}
}
};
Ok(Self {
verify_certificate: options.verify_certificate.unwrap_or(!for_server),
verify_hostname: options.verify_hostname.unwrap_or(!for_server),
authority,
identity,
})
}
fn identity(&self) -> Option<ParsedPkcs12> {
// This data was test-built previously, so we can just use it
// here and expect the results will not fail. This can all be
// reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone
// impl.
self.identity.as_ref().map(|identity| {
Pkcs12::from_der(&identity.0)
.expect("Could not build PKCS#12 archive from parsed data")
.parse(&identity.1)
.expect("Could not parse stored PKCS#12 archive")
})
}
pub(super) fn apply_context(&self, context: &mut SslContextBuilder) -> Result<()> {
context.set_verify(if self.verify_certificate {
SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT
} else {
SslVerifyMode::NONE
});
if let Some(identity) = self.identity() {
context
.set_certificate(&identity.cert)
.context(SetCertificate)?;
context
.set_private_key(&identity.pkey)
.context(SetPrivateKey)?;
if let Some(chain) = identity.chain {
for cert in chain {
context
.add_extra_chain_cert(cert)
.context(AddExtraChainCert)?;
}
}
}
if let Some(certificate) = &self.authority {
let mut store = X509StoreBuilder::new().context(NewStoreBuilder)?;
store
.add_cert(certificate.clone())
.context(AddCertToStore)?;
context
.set_verify_cert_store(store.build())
.context(SetVerifyCert)?;
}
Ok(())
}
pub fn apply_connect_configuration(&self, connection: &mut ConnectConfiguration) {
connection.set_verify_hostname(self.verify_hostname);
}
}
impl Debug for TlsSettings {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("TlsSettings")
.field("verify_certificate", &self.verify_certificate)
.field("verify_hostname", &self.verify_hostname)
.finish()
}
}
pub type MaybeTlsSettings = MaybeTls<(), TlsSettings>;
impl MaybeTlsSettings {
/// Generate an optional settings struct from the given optional
/// configuration reference. If `config` is `None`, TLS is
/// disabled. The `for_server` parameter indicates the options
/// should be interpreted as being for a TLS server, which requires
/// an identity certificate and changes the certificate verification
/// default to false.
pub fn from_config(config: &Option<TlsConfig>, for_server: bool) -> Result<Self> {
match config {
None => Ok(Self::Raw(())), // No config, no TLS settings
Some(config) => match config.enabled.unwrap_or(false) {
false => Ok(Self::Raw(())), // Explicitly disabled, still no TLS settings
true => {
let tls =
TlsSettings::from_options_base(&Some(config.options.clone()), for_server)?;
match (for_server, &tls.identity) {
// Servers require an identity certificate
(true, None) => Err(TlsError::MissingRequiredIdentity.into()),
_ => Ok(Self::Tls(tls)),
}
}
},
}
}
}
impl From<TlsSettings> for MaybeTlsSettings {
fn from(tls: TlsSettings) -> Self {
Self::Tls(tls)
}
}
/// Load a private key from a named file
fn load_key(filename: &Path, pass_phrase: &Option<String>) -> Result<PKey<Private>> {
let data = open_read(filename, "key")?;
match pass_phrase {
None => Ok(PKey::private_key_from_der(&data)
.or_else(|_| PKey::private_key_from_pem(&data))
.with_context(|| PrivateKeyParseError { filename })?),
Some(phrase) => Ok(
PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes())
.or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes()))
.with_context(|| PrivateKeyParseError { filename })?,
),
}
}
/// Load an X.509 certificate from a named file
fn load_x509(filename: &Path) -> Result<X509> {
let data = open_read(filename, "certificate")?;
Ok(X509::from_der(&data)
.or_else(|_| X509::from_pem(&data))
.with_context(|| X509ParseError { filename })?)
}
fn open_read(filename: &Path, note: &'static str) -> Result<Vec<u8>> {
let mut text = Vec::<u8>::new();
File::open(filename)
.with_context(|| FileOpenFailed { note, filename })?
.read_to_end(&mut text)
.with_context(|| FileReadFailed { note, filename })?;
Ok(text)
}
#[cfg(test)]
mod test {
use super::*;
const TEST_PKCS12: &str = "tests/data/localhost.p12";
const TEST_PEM_CRT: &str = "tests/data/localhost.crt";
const TEST_PEM_KEY: &str = "tests/data/localhost.key";
#[test]
fn | () {
let options = TlsOptions {
crt_path: Some(TEST_PKCS12.into()),
key_pass: Some("NOPASS".into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PKCS#12 certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_pem() {
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PEM certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_ca() {
let options = TlsOptions {
ca_path: Some("tests/data/Vector_CA.crt".into()),
..Default::default()
};
let settings = TlsSettings::from_options(&Some(options))
.expect("Failed to load authority certificate");
assert!(settings.identity.is_none());
assert!(settings.authority.is_some());
}
#[test]
fn from_options_none() {
let settings = TlsSettings::from_options(&None).expect("Failed to generate null settings");
assert!(settings.identity.is_none());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_bad_certificate() {
let options = TlsOptions {
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let error = TlsSettings::from_options(&Some(options))
.expect_err("from_options failed to check certificate");
assert!(matches!(error, TlsError::MissingCrtKeyFile));
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
..Default::default()
};
let _error = TlsSettings::from_options(&Some(options))
.expect_err("from_options failed to check certificate");
// Actual error is an ASN parse, doesn't really matter
}
#[test]
fn from_config_none() {
assert!(MaybeTlsSettings::from_config(&None, true).unwrap().is_raw());
assert!(MaybeTlsSettings::from_config(&None, false)
.unwrap()
.is_raw());
}
#[test]
fn from_config_not_enabled() {
assert!(settings_from_config(None, false, false, true).is_raw());
assert!(settings_from_config(None, false, false, false).is_raw());
assert!(settings_from_config(Some(false), false, false, true).is_raw());
assert!(settings_from_config(Some(false), false, false, false).is_raw());
}
#[test]
fn from_config_fails_without_certificate() {
let config = make_config(Some(true), false, false);
let error = MaybeTlsSettings::from_config(&Some(config), true)
.expect_err("from_config failed to check for a certificate");
assert!(matches!(error, TlsError::MissingRequiredIdentity));
}
#[test]
fn from_config_with_certificate() {
let config = settings_from_config(Some(true), true, true, true);
assert!(config.is_tls());
}
fn settings_from_config(
enabled: Option<bool>,
set_crt: bool,
set_key: bool,
for_server: bool,
) -> MaybeTlsSettings {
let config = make_config(enabled, set_crt, set_key);
MaybeTlsSettings::from_config(&Some(config), for_server)
.expect("Failed to generate settings from config")
}
fn make_config(enabled: Option<bool>, set_crt: bool, set_key: bool) -> TlsConfig {
TlsConfig {
enabled,
options: TlsOptions {
crt_path: and_some(set_crt, TEST_PEM_CRT.into()),
key_path: and_some(set_key, TEST_PEM_KEY.into()),
..Default::default()
},
}
}
// This can be eliminated once the `bool_to_option` feature migrates
// out of nightly.
fn and_some<T>(src: bool, value: T) -> Option<T> {
match src {
true => Some(value),
false => None,
}
}
}
| from_options_pkcs12 | identifier_name |
settings.rs | use super::{
AddCertToStore, AddExtraChainCert, DerExportError, FileOpenFailed, FileReadFailed, MaybeTls,
NewStoreBuilder, ParsePkcs12, Pkcs12Error, PrivateKeyParseError, Result, SetCertificate,
SetPrivateKey, SetVerifyCert, TlsError, TlsIdentityError, X509ParseError,
};
use openssl::{
pkcs12::{ParsedPkcs12, Pkcs12},
pkey::{PKey, Private},
ssl::{ConnectConfiguration, SslContextBuilder, SslVerifyMode},
x509::{store::X509StoreBuilder, X509},
};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use std::fmt::{self, Debug, Formatter};
use std::fs::File;
use std::io::Read;
use std::path::{Path, PathBuf};
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct TlsConfig {
pub enabled: Option<bool>,
#[serde(flatten)]
pub options: TlsOptions,
}
/// Standard TLS options
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct TlsOptions {
pub verify_certificate: Option<bool>,
pub verify_hostname: Option<bool>,
pub ca_path: Option<PathBuf>,
pub crt_path: Option<PathBuf>,
pub key_path: Option<PathBuf>,
pub key_pass: Option<String>,
}
/// Directly usable settings for TLS connectors
#[derive(Clone, Default)]
pub struct TlsSettings {
verify_certificate: bool,
pub(super) verify_hostname: bool,
authority: Option<X509>,
pub(super) identity: Option<IdentityStore>, // openssl::pkcs12::ParsedPkcs12 doesn't impl Clone yet
}
#[derive(Clone)]
pub struct IdentityStore(Vec<u8>, String);
impl TlsSettings {
/// Generate a filled out settings struct from the given optional
/// option set, interpreted as client options. If `options` is
/// `None`, the result is set to defaults (ie empty).
pub fn from_options(options: &Option<TlsOptions>) -> Result<Self> {
Self::from_options_base(options, false)
}
pub(super) fn from_options_base(
options: &Option<TlsOptions>,
for_server: bool,
) -> Result<Self> {
let default = TlsOptions::default();
let options = options.as_ref().unwrap_or(&default);
if!for_server {
if options.verify_certificate == Some(false) {
warn!(
"`verify_certificate` is DISABLED, this may lead to security vulnerabilities"
);
}
if options.verify_hostname == Some(false) {
warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities");
}
}
if options.key_path.is_some() && options.crt_path.is_none() {
return Err(TlsError::MissingCrtKeyFile.into());
}
let authority = match options.ca_path {
None => None,
Some(ref path) => Some(load_x509(path)?),
};
| let name = crt_path.to_string_lossy().to_string();
let cert_data = open_read(crt_path, "certificate")?;
let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or("");
match Pkcs12::from_der(&cert_data) {
// Certificate file is DER encoded PKCS#12 archive
Ok(pkcs12) => {
// Verify password
pkcs12.parse(&key_pass).context(ParsePkcs12)?;
Some(IdentityStore(cert_data, key_pass.to_string()))
}
Err(source) => {
if options.key_path.is_none() {
return Err(TlsError::ParsePkcs12 { source });
}
// Identity is a PEM encoded certficate+key pair
let crt = load_x509(crt_path)?;
let key_path = options.key_path.as_ref().unwrap();
let key = load_key(&key_path, &options.key_pass)?;
let pkcs12 = Pkcs12::builder()
.build("", &name, &key, &crt)
.context(Pkcs12Error)?;
let identity = pkcs12.to_der().context(DerExportError)?;
// Build the resulting parsed PKCS#12 archive,
// but don't store it, as it cannot be cloned.
// This is just for error checking.
pkcs12.parse("").context(TlsIdentityError)?;
Some(IdentityStore(identity, "".into()))
}
}
}
};
Ok(Self {
verify_certificate: options.verify_certificate.unwrap_or(!for_server),
verify_hostname: options.verify_hostname.unwrap_or(!for_server),
authority,
identity,
})
}
fn identity(&self) -> Option<ParsedPkcs12> {
// This data was test-built previously, so we can just use it
// here and expect the results will not fail. This can all be
// reworked when `openssl::pkcs12::ParsedPkcs12` gains the Clone
// impl.
self.identity.as_ref().map(|identity| {
Pkcs12::from_der(&identity.0)
.expect("Could not build PKCS#12 archive from parsed data")
.parse(&identity.1)
.expect("Could not parse stored PKCS#12 archive")
})
}
pub(super) fn apply_context(&self, context: &mut SslContextBuilder) -> Result<()> {
context.set_verify(if self.verify_certificate {
SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT
} else {
SslVerifyMode::NONE
});
if let Some(identity) = self.identity() {
context
.set_certificate(&identity.cert)
.context(SetCertificate)?;
context
.set_private_key(&identity.pkey)
.context(SetPrivateKey)?;
if let Some(chain) = identity.chain {
for cert in chain {
context
.add_extra_chain_cert(cert)
.context(AddExtraChainCert)?;
}
}
}
if let Some(certificate) = &self.authority {
let mut store = X509StoreBuilder::new().context(NewStoreBuilder)?;
store
.add_cert(certificate.clone())
.context(AddCertToStore)?;
context
.set_verify_cert_store(store.build())
.context(SetVerifyCert)?;
}
Ok(())
}
pub fn apply_connect_configuration(&self, connection: &mut ConnectConfiguration) {
connection.set_verify_hostname(self.verify_hostname);
}
}
impl Debug for TlsSettings {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("TlsSettings")
.field("verify_certificate", &self.verify_certificate)
.field("verify_hostname", &self.verify_hostname)
.finish()
}
}
pub type MaybeTlsSettings = MaybeTls<(), TlsSettings>;
impl MaybeTlsSettings {
/// Generate an optional settings struct from the given optional
/// configuration reference. If `config` is `None`, TLS is
/// disabled. The `for_server` parameter indicates the options
/// should be interpreted as being for a TLS server, which requires
/// an identity certificate and changes the certificate verification
/// default to false.
pub fn from_config(config: &Option<TlsConfig>, for_server: bool) -> Result<Self> {
match config {
None => Ok(Self::Raw(())), // No config, no TLS settings
Some(config) => match config.enabled.unwrap_or(false) {
false => Ok(Self::Raw(())), // Explicitly disabled, still no TLS settings
true => {
let tls =
TlsSettings::from_options_base(&Some(config.options.clone()), for_server)?;
match (for_server, &tls.identity) {
// Servers require an identity certificate
(true, None) => Err(TlsError::MissingRequiredIdentity.into()),
_ => Ok(Self::Tls(tls)),
}
}
},
}
}
}
impl From<TlsSettings> for MaybeTlsSettings {
fn from(tls: TlsSettings) -> Self {
Self::Tls(tls)
}
}
/// Load a private key from a named file
fn load_key(filename: &Path, pass_phrase: &Option<String>) -> Result<PKey<Private>> {
let data = open_read(filename, "key")?;
match pass_phrase {
None => Ok(PKey::private_key_from_der(&data)
.or_else(|_| PKey::private_key_from_pem(&data))
.with_context(|| PrivateKeyParseError { filename })?),
Some(phrase) => Ok(
PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes())
.or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes()))
.with_context(|| PrivateKeyParseError { filename })?,
),
}
}
/// Load an X.509 certificate from a named file
fn load_x509(filename: &Path) -> Result<X509> {
let data = open_read(filename, "certificate")?;
Ok(X509::from_der(&data)
.or_else(|_| X509::from_pem(&data))
.with_context(|| X509ParseError { filename })?)
}
fn open_read(filename: &Path, note: &'static str) -> Result<Vec<u8>> {
let mut text = Vec::<u8>::new();
File::open(filename)
.with_context(|| FileOpenFailed { note, filename })?
.read_to_end(&mut text)
.with_context(|| FileReadFailed { note, filename })?;
Ok(text)
}
#[cfg(test)]
mod test {
use super::*;
const TEST_PKCS12: &str = "tests/data/localhost.p12";
const TEST_PEM_CRT: &str = "tests/data/localhost.crt";
const TEST_PEM_KEY: &str = "tests/data/localhost.key";
#[test]
fn from_options_pkcs12() {
let options = TlsOptions {
crt_path: Some(TEST_PKCS12.into()),
key_pass: Some("NOPASS".into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PKCS#12 certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_pem() {
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let settings =
TlsSettings::from_options(&Some(options)).expect("Failed to load PEM certificate");
assert!(settings.identity.is_some());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_ca() {
let options = TlsOptions {
ca_path: Some("tests/data/Vector_CA.crt".into()),
..Default::default()
};
let settings = TlsSettings::from_options(&Some(options))
.expect("Failed to load authority certificate");
assert!(settings.identity.is_none());
assert!(settings.authority.is_some());
}
#[test]
fn from_options_none() {
let settings = TlsSettings::from_options(&None).expect("Failed to generate null settings");
assert!(settings.identity.is_none());
assert!(settings.authority.is_none());
}
#[test]
fn from_options_bad_certificate() {
let options = TlsOptions {
key_path: Some(TEST_PEM_KEY.into()),
..Default::default()
};
let error = TlsSettings::from_options(&Some(options))
.expect_err("from_options failed to check certificate");
assert!(matches!(error, TlsError::MissingCrtKeyFile));
let options = TlsOptions {
crt_path: Some(TEST_PEM_CRT.into()),
..Default::default()
};
let _error = TlsSettings::from_options(&Some(options))
.expect_err("from_options failed to check certificate");
// Actual error is an ASN parse, doesn't really matter
}
#[test]
fn from_config_none() {
assert!(MaybeTlsSettings::from_config(&None, true).unwrap().is_raw());
assert!(MaybeTlsSettings::from_config(&None, false)
.unwrap()
.is_raw());
}
#[test]
fn from_config_not_enabled() {
assert!(settings_from_config(None, false, false, true).is_raw());
assert!(settings_from_config(None, false, false, false).is_raw());
assert!(settings_from_config(Some(false), false, false, true).is_raw());
assert!(settings_from_config(Some(false), false, false, false).is_raw());
}
#[test]
fn from_config_fails_without_certificate() {
let config = make_config(Some(true), false, false);
let error = MaybeTlsSettings::from_config(&Some(config), true)
.expect_err("from_config failed to check for a certificate");
assert!(matches!(error, TlsError::MissingRequiredIdentity));
}
#[test]
fn from_config_with_certificate() {
let config = settings_from_config(Some(true), true, true, true);
assert!(config.is_tls());
}
fn settings_from_config(
enabled: Option<bool>,
set_crt: bool,
set_key: bool,
for_server: bool,
) -> MaybeTlsSettings {
let config = make_config(enabled, set_crt, set_key);
MaybeTlsSettings::from_config(&Some(config), for_server)
.expect("Failed to generate settings from config")
}
fn make_config(enabled: Option<bool>, set_crt: bool, set_key: bool) -> TlsConfig {
TlsConfig {
enabled,
options: TlsOptions {
crt_path: and_some(set_crt, TEST_PEM_CRT.into()),
key_path: and_some(set_key, TEST_PEM_KEY.into()),
..Default::default()
},
}
}
// This can be eliminated once the `bool_to_option` feature migrates
// out of nightly.
fn and_some<T>(src: bool, value: T) -> Option<T> {
match src {
true => Some(value),
false => None,
}
}
} | let identity = match options.crt_path {
None => None,
Some(ref crt_path) => { | random_line_split |
layout_rope.rs | //! A rope-based vector of layouts.
use std::ops::Range;
use std::sync::Arc;
use druid::piet::{PietTextLayout, TextLayout};
use xi_rope::interval::{Interval, IntervalBounds};
use xi_rope::tree::{Cursor, DefaultMetric, Leaf, Metric, Node, NodeInfo, TreeBuilder};
/// A type representing a height measure.
///
/// Internally this is stored as `usize` using fixed point arithmetic,
/// for two reasons. First, it lets the rope reuse the `Metric` mechanism.
/// Second, it means that the monoid property is exact, which would not be
/// the case for `f64`.
///
/// Currently, there are 8 bits of fraction. On 32 bit platforms, that
/// means a maximum height of 16M, which should be good enough for most
/// practical use but could be a limitation. Of course, on 64 bit platforms,
/// the limit of 7.2e16 should never be a problem.
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Height(usize);
/// An individual layout within the rope.
///
/// Right now, this is just a Piet TextLayout, but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height {
type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
}
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn | (&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
}
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let (leaf, start_pos) = self.cursor.get_leaf().unwrap();
let len = (self.end - self.cursor.pos()).min(leaf.len() - start_pos);
self.cursor.next_leaf();
Some(&leaf.data[start_pos..start_pos + len])
}
}
impl Metric<LayoutInfo> for BaseMetric {
fn measure(_: &LayoutInfo, len: usize) -> usize {
len
}
fn to_base_units(_l: &LayoutLeaf, in_measured_units: usize) -> usize {
in_measured_units
}
fn from_base_units(_l: &LayoutLeaf, in_base_units: usize) -> usize {
in_base_units
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
false
}
}
impl Metric<LayoutInfo> for HeightMetric {
fn measure(info: &LayoutInfo, _len: usize) -> usize {
info.height.as_raw_frac()
}
fn from_base_units(l: &LayoutLeaf, in_base_units: usize) -> usize {
let mut height = Height::ZERO;
for (h, _el) in &l.data[..in_base_units] {
height += *h;
}
height.as_raw_frac()
}
fn to_base_units(l: &LayoutLeaf, in_measured_units: usize) -> usize {
let mut m1 = in_measured_units;
let mut m2 = 0;
for (h, _el) in &l.data {
if m1 == 0 || m1 < h.as_raw_frac() {
break;
}
m1 -= h.as_raw_frac();
m2 += 1;
}
m2
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
// The documentation in xi-rope is confusing (TODO: fix that),
// but basically this predicate asks whether a nonempty leaf
// may contain zero measure. Since we're not disallowing that,
// we say "yes" here. If we did disallow zero-height layouts,
// then this stuff would be (slightly) more efficient.
true
}
}
| remove | identifier_name |
layout_rope.rs | //! A rope-based vector of layouts.
use std::ops::Range;
use std::sync::Arc;
use druid::piet::{PietTextLayout, TextLayout};
use xi_rope::interval::{Interval, IntervalBounds};
use xi_rope::tree::{Cursor, DefaultMetric, Leaf, Metric, Node, NodeInfo, TreeBuilder};
/// A type representing a height measure.
///
/// Internally this is stored as `usize` using fixed point arithmetic,
/// for two reasons. First, it lets the rope reuse the `Metric` mechanism.
/// Second, it means that the monoid property is exact, which would not be
/// the case for `f64`.
///
/// Currently, there are 8 bits of fraction. On 32 bit platforms, that
/// means a maximum height of 16M, which should be good enough for most
/// practical use but could be a limitation. Of course, on 64 bit platforms,
/// the limit of 7.2e16 should never be a problem.
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Height(usize);
/// An individual layout within the rope.
///
/// Right now, this is just a Piet TextLayout, but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height {
type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
}
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn remove(&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize |
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let (leaf, start_pos) = self.cursor.get_leaf().unwrap();
let len = (self.end - self.cursor.pos()).min(leaf.len() - start_pos);
self.cursor.next_leaf();
Some(&leaf.data[start_pos..start_pos + len])
}
}
impl Metric<LayoutInfo> for BaseMetric {
fn measure(_: &LayoutInfo, len: usize) -> usize {
len
}
fn to_base_units(_l: &LayoutLeaf, in_measured_units: usize) -> usize {
in_measured_units
}
fn from_base_units(_l: &LayoutLeaf, in_base_units: usize) -> usize {
in_base_units
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
false
}
}
impl Metric<LayoutInfo> for HeightMetric {
fn measure(info: &LayoutInfo, _len: usize) -> usize {
info.height.as_raw_frac()
}
fn from_base_units(l: &LayoutLeaf, in_base_units: usize) -> usize {
let mut height = Height::ZERO;
for (h, _el) in &l.data[..in_base_units] {
height += *h;
}
height.as_raw_frac()
}
fn to_base_units(l: &LayoutLeaf, in_measured_units: usize) -> usize {
let mut m1 = in_measured_units;
let mut m2 = 0;
for (h, _el) in &l.data {
if m1 == 0 || m1 < h.as_raw_frac() {
break;
}
m1 -= h.as_raw_frac();
m2 += 1;
}
m2
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
// The documentation in xi-rope is confusing (TODO: fix that),
// but basically this predicate asks whether a nonempty leaf
// may contain zero measure. Since we're not disallowing that,
// we say "yes" here. If we did disallow zero-height layouts,
// then this stuff would be (slightly) more efficient.
true
}
}
| {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
} | identifier_body |
layout_rope.rs | //! A rope-based vector of layouts.
use std::ops::Range;
use std::sync::Arc;
use druid::piet::{PietTextLayout, TextLayout};
use xi_rope::interval::{Interval, IntervalBounds};
use xi_rope::tree::{Cursor, DefaultMetric, Leaf, Metric, Node, NodeInfo, TreeBuilder};
/// A type representing a height measure.
///
/// Internally this is stored as `usize` using fixed point arithmetic,
/// for two reasons. First, it lets the rope reuse the `Metric` mechanism.
/// Second, it means that the monoid property is exact, which would not be
/// the case for `f64`.
///
/// Currently, there are 8 bits of fraction. On 32 bit platforms, that
/// means a maximum height of 16M, which should be good enough for most
/// practical use but could be a limitation. Of course, on 64 bit platforms,
/// the limit of 7.2e16 should never be a problem.
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Height(usize);
/// An individual layout within the rope.
///
/// Right now, this is just a Piet TextLayout, but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height {
type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else |
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn remove(&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
}
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let (leaf, start_pos) = self.cursor.get_leaf().unwrap();
let len = (self.end - self.cursor.pos()).min(leaf.len() - start_pos);
self.cursor.next_leaf();
Some(&leaf.data[start_pos..start_pos + len])
}
}
impl Metric<LayoutInfo> for BaseMetric {
fn measure(_: &LayoutInfo, len: usize) -> usize {
len
}
fn to_base_units(_l: &LayoutLeaf, in_measured_units: usize) -> usize {
in_measured_units
}
fn from_base_units(_l: &LayoutLeaf, in_base_units: usize) -> usize {
in_base_units
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
false
}
}
impl Metric<LayoutInfo> for HeightMetric {
fn measure(info: &LayoutInfo, _len: usize) -> usize {
info.height.as_raw_frac()
}
fn from_base_units(l: &LayoutLeaf, in_base_units: usize) -> usize {
let mut height = Height::ZERO;
for (h, _el) in &l.data[..in_base_units] {
height += *h;
}
height.as_raw_frac()
}
fn to_base_units(l: &LayoutLeaf, in_measured_units: usize) -> usize {
let mut m1 = in_measured_units;
let mut m2 = 0;
for (h, _el) in &l.data {
if m1 == 0 || m1 < h.as_raw_frac() {
break;
}
m1 -= h.as_raw_frac();
m2 += 1;
}
m2
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
// The documentation in xi-rope is confusing (TODO: fix that),
// but basically this predicate asks whether a nonempty leaf
// may contain zero measure. Since we're not disallowing that,
// we say "yes" here. If we did disallow zero-height layouts,
// then this stuff would be (slightly) more efficient.
true
}
}
| {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
} | conditional_block |
layout_rope.rs | //! A rope-based vector of layouts.
use std::ops::Range;
use std::sync::Arc;
use druid::piet::{PietTextLayout, TextLayout};
use xi_rope::interval::{Interval, IntervalBounds};
use xi_rope::tree::{Cursor, DefaultMetric, Leaf, Metric, Node, NodeInfo, TreeBuilder};
/// A type representing a height measure.
///
/// Internally this is stored as `usize` using fixed point arithmetic,
/// for two reasons. First, it lets the rope reuse the `Metric` mechanism.
/// Second, it means that the monoid property is exact, which would not be
/// the case for `f64`.
///
/// Currently, there are 8 bits of fraction. On 32 bit platforms, that
/// means a maximum height of 16M, which should be good enough for most
/// practical use but could be a limitation. Of course, on 64 bit platforms,
/// the limit of 7.2e16 should never be a problem.
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
pub struct Height(usize);
/// An individual layout within the rope.
///
/// Right now, this is just a Piet TextLayout, but we might add more stuff.
pub struct Layout(PietTextLayout);
#[derive(Clone, Default)]
pub struct LayoutRope(Node<LayoutInfo>);
pub struct LayoutRopeBuilder(TreeBuilder<LayoutInfo>);
/// The height metric of the rope, which is in raw Height fractions.
struct HeightMetric;
/// The base metric of the rope, which just counts the number of layouts.
pub struct BaseMetric;
// This technically doesn't have to be newtyped, we could impl leaf on
// the Vec directly, but this feels cleaner.
#[derive(Clone, Default)]
struct LayoutLeaf {
data: Vec<(Height, Arc<Layout>)>,
}
#[derive(Clone)]
struct LayoutInfo {
/// The height of this section of rope.
height: Height,
}
impl std::ops::Add for Height { | type Output = Self;
fn add(self, other: Self) -> Self {
Height(self.0 + other.0)
}
}
impl std::ops::AddAssign for Height {
fn add_assign(&mut self, other: Self) {
self.0 += other.0
}
}
impl Height {
/// The number of fractional bits in the representation.
pub const HEIGHT_FRAC_BITS: usize = 8;
/// The scale factor for converting from `f64`.
pub const SCALE_FACTOR: f64 = (1 << Self::HEIGHT_FRAC_BITS) as f64;
pub const ZERO: Height = Height(0);
pub fn from_raw_frac(frac: usize) -> Height {
Height(frac)
}
pub fn as_raw_frac(self) -> usize {
self.0
}
pub fn from_f64(height: f64) -> Height {
Height((height * Self::SCALE_FACTOR).round() as usize)
}
pub fn to_f64(self) -> f64 {
self.0 as f64 / Self::SCALE_FACTOR
}
}
impl Layout {
pub fn new(inner: PietTextLayout) -> Layout {
Layout(inner)
}
pub fn piet_layout(&self) -> &PietTextLayout {
&self.0
}
pub fn height(&self) -> Height {
let size = self.0.size();
Height::from_f64(size.height)
}
}
impl NodeInfo for LayoutInfo {
type L = LayoutLeaf;
fn accumulate(&mut self, other: &Self) {
self.height += other.height;
}
fn compute_info(leaf: &Self::L) -> Self {
let mut height = Height::ZERO;
for (leaf_height, _) in &leaf.data {
height += *leaf_height;
}
LayoutInfo { height }
}
}
impl DefaultMetric for LayoutInfo {
type DefaultMetric = BaseMetric;
}
const MIN_LEAF: usize = 16;
const MAX_LEAF: usize = 32;
impl Leaf for LayoutLeaf {
fn len(&self) -> usize {
self.data.len()
}
fn is_ok_child(&self) -> bool {
self.data.len() >= MIN_LEAF
}
fn push_maybe_split(&mut self, other: &Self, iv: Interval) -> Option<Self> {
let (start, end) = iv.start_end();
self.data.extend_from_slice(&other.data[start..end]);
if self.len() <= MAX_LEAF {
None
} else {
let splitpoint = self.len() / 2;
let right_vec = self.data.split_off(splitpoint);
Some(LayoutLeaf { data: right_vec })
}
}
}
impl From<Vec<(Height, Arc<Layout>)>> for LayoutRope {
fn from(v: Vec<(Height, Arc<Layout>)>) -> Self {
LayoutRope(Node::from_leaf(LayoutLeaf { data: v }))
}
}
impl LayoutRope {
/// The number of layouts in the rope.
pub fn len(&self) -> usize {
self.0.len()
}
/// The total height of the rope.
pub fn height(&self) -> Height {
Height::from_raw_frac(self.0.measure::<HeightMetric>())
}
/// A rope consisting of a single layout.
pub fn singleton(item: Layout) -> LayoutRope {
LayoutRope(Node::from_leaf(Self::singleton_leaf(item)))
}
fn singleton_leaf(item: Layout) -> LayoutLeaf {
let height = item.height();
LayoutLeaf {
data: vec![(height, Arc::new(item))],
}
}
pub fn get(&self, index: usize) -> Option<(Height, &Layout)> {
let cursor = Cursor::new(&self.0, index);
cursor
.get_leaf()
.and_then(|(leaf, offset)| leaf.data.get(offset))
.map(|(height, layout)| (*height, &**layout))
}
// These mutation methods might go away in favor of using the builder.
pub fn push(&mut self, item: Layout) {
let el = Self::singleton(item);
// This could be optimized more.
self.0 = Node::concat(self.0.clone(), el.0)
}
pub fn remove(&mut self, index: usize) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn set(&mut self, index: usize, item: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(item));
self.push_subseq(&mut b, Interval::new(index + 1, self.len()));
self.0 = b.build();
}
pub fn insert(&mut self, index: usize, value: Layout) {
let mut b = TreeBuilder::new();
self.push_subseq(&mut b, Interval::new(0, index));
b.push_leaf(Self::singleton_leaf(value));
self.push_subseq(&mut b, Interval::new(index, self.len()));
self.0 = b.build();
}
fn iter_chunks(&self, range: impl IntervalBounds) -> ChunkIter {
let Interval { start, end } = range.into_interval(self.len());
ChunkIter {
cursor: Cursor::new(&self.0, start),
end,
}
}
/// The height at the top of the layout at the given index.
///
/// This is simply the sum of the heights of the layouts that come before
/// it.
pub fn height_of_index(&self, index: usize) -> Height {
Height::from_raw_frac(self.0.count::<HeightMetric>(index))
}
/// The layout at the given height.
///
/// Edge cases get interesting (especially since zero-height layouts are
/// not forbidden), so here is a more precise spec: it is the first layout
/// that either contains (in the closed-open interval sense) the given
/// height, or is a zero-height layout at the given height.
///
/// If the total height is given and the rope does not end on a zero-height
/// layout, then it returns the number of layouts.
///
/// TODO: is there a simpler way to state that? It seems more complicated
/// than it should be.
pub fn index_of_height(&self, height: Height) -> usize {
self.0
.count_base_units::<HeightMetric>(height.as_raw_frac())
}
fn push_subseq(&self, b: &mut TreeBuilder<LayoutInfo>, iv: Interval) {
// TODO: if we make the push_subseq method in xi-rope public, we can save some
// allocations.
b.push(self.0.subseq(iv));
}
}
impl LayoutRopeBuilder {
pub fn new() -> LayoutRopeBuilder {
LayoutRopeBuilder(TreeBuilder::new())
}
#[allow(unused)]
pub fn push_rope_slice(&mut self, other: &LayoutRope, range: Range<usize>) {
// TODO: use push_subseq method on TreeBuilder when that lands.
self.0.push(other.0.subseq(Interval::from(range)))
}
pub fn push_layout(&mut self, layout: Layout) {
// Maybe move the body of singleton_leaf to here?
self.0.push_leaf(LayoutRope::singleton_leaf(layout))
}
pub fn build(self) -> LayoutRope {
LayoutRope(self.0.build())
}
}
impl<'a> IntoIterator for &'a LayoutRope {
// Maybe `(Height, &'a Layout)` would be better, not to expose the internal
// representation, but it's a bit more work.
type Item = &'a (Height, Arc<Layout>);
type IntoIter = std::iter::Flatten<ChunkIter<'a>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_chunks(..).flatten()
}
}
pub struct ChunkIter<'a> {
cursor: Cursor<'a, LayoutInfo>,
end: usize,
}
impl<'a> Iterator for ChunkIter<'a> {
type Item = &'a [(Height, Arc<Layout>)];
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.pos() >= self.end {
return None;
}
let (leaf, start_pos) = self.cursor.get_leaf().unwrap();
let len = (self.end - self.cursor.pos()).min(leaf.len() - start_pos);
self.cursor.next_leaf();
Some(&leaf.data[start_pos..start_pos + len])
}
}
impl Metric<LayoutInfo> for BaseMetric {
fn measure(_: &LayoutInfo, len: usize) -> usize {
len
}
fn to_base_units(_l: &LayoutLeaf, in_measured_units: usize) -> usize {
in_measured_units
}
fn from_base_units(_l: &LayoutLeaf, in_base_units: usize) -> usize {
in_base_units
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
false
}
}
impl Metric<LayoutInfo> for HeightMetric {
fn measure(info: &LayoutInfo, _len: usize) -> usize {
info.height.as_raw_frac()
}
fn from_base_units(l: &LayoutLeaf, in_base_units: usize) -> usize {
let mut height = Height::ZERO;
for (h, _el) in &l.data[..in_base_units] {
height += *h;
}
height.as_raw_frac()
}
fn to_base_units(l: &LayoutLeaf, in_measured_units: usize) -> usize {
let mut m1 = in_measured_units;
let mut m2 = 0;
for (h, _el) in &l.data {
if m1 == 0 || m1 < h.as_raw_frac() {
break;
}
m1 -= h.as_raw_frac();
m2 += 1;
}
m2
}
fn is_boundary(_l: &LayoutLeaf, _offset: usize) -> bool {
true
}
fn prev(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset - 1)
}
fn next(_l: &LayoutLeaf, offset: usize) -> Option<usize> {
Some(offset + 1)
}
fn can_fragment() -> bool {
// The documentation in xi-rope is confusing (TODO: fix that),
// but basically this predicate asks whether a nonempty leaf
// may contain zero measure. Since we're not disallowing that,
// we say "yes" here. If we did disallow zero-height layouts,
// then this stuff would be (slightly) more efficient.
true
}
} | random_line_split |
|
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else {
self.word.cmp(other.word)
}
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) { | self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm!= 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
} | Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
| random_line_split |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else {
self.word.cmp(other.word)
}
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> |
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm!= 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
}
| {
&self.indices
} | identifier_body |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else |
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm!= 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
}
| {
self.word.cmp(other.word)
} | conditional_block |
embeddings.rs | use std::cmp::Ordering;
use std::collections::hash_map::Entry;
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::iter::Enumerate;
use std::slice;
use failure::Error;
use ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
/// A word similarity.
///
/// This data structure is used to store a pair consisting of a word and
/// its similarity to a query word.
#[derive(Debug)]
pub struct WordSimilarity<'a> {
pub word: &'a str,
pub similarity: f32,
}
impl<'a> Ord for WordSimilarity<'a> {
fn cmp(&self, other: &Self) -> Ordering {
if self.similarity > other.similarity {
Ordering::Less
} else if self.similarity < other.similarity {
Ordering::Greater
} else {
self.word.cmp(other.word)
}
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum | {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm!= 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: BinaryHeap<WordSimilarity> = BinaryHeap::new();
for (idx, sim) in sims.iter().enumerate() {
let word = self.words[idx].as_ref();
// Don't add words that we are explicitly asked to skip.
if skip.contains(word) {
continue;
}
let word_distance = WordSimilarity {
word: word,
similarity: *sim,
};
if results.len() == limit {
if let Some(mut min_distance) = results.peek_mut() {
if word_distance.similarity > min_distance.similarity {
*min_distance = word_distance
}
}
} else {
results.push(word_distance);
}
}
results.into_sorted_vec()
}
/// Get the number of words for which embeddings are stored.
pub fn len(&self) -> usize {
self.words.len()
}
/// Get the words for which embeddings are stored. The words line up with
/// the rows in the matrix returned by `data`.
pub fn words(&self) -> &[String] {
&self.words
}
}
impl<'a> IntoIterator for &'a Embeddings {
type Item = (&'a str, ArrayView1<'a, f32>);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator over embeddings.
pub struct Iter<'a> {
embeddings: &'a Embeddings,
inner: Enumerate<slice::Iter<'a, String>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, ArrayView1<'a, f32>);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|(idx, word)| {
(
word.as_str(),
self.embeddings.matrix.index_axis(Axis(0), idx),
)
})
}
}
pub fn new_embeddings(
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
) -> Embeddings {
Embeddings {
matrix: matrix,
embed_len: embed_len,
indices: indices,
words: words,
}
}
| BuilderError | identifier_name |
utils.rs | use std::cmp::Ordering;
use std::collections::HashSet;
use crate::id;
use serde::{Deserialize, Serialize};
use swc_core::common::{Mark, Span, SyntaxContext, DUMMY_SP};
use swc_core::ecma::ast::{self, Id};
use swc_core::ecma::atoms::{js_word, JsWord};
pub fn match_member_expr(expr: &ast::MemberExpr, idents: Vec<&str>, decls: &HashSet<Id>) -> bool {
use ast::{Expr, Ident, Lit, MemberProp, Str};
let mut member = expr;
let mut idents = idents;
while idents.len() > 1 {
let expected = idents.pop().unwrap();
let prop = match &member.prop {
MemberProp::Computed(comp) => {
if let Expr::Lit(Lit::Str(Str { value: ref sym,.. })) = *comp.expr {
sym
} else {
return false;
}
}
MemberProp::Ident(Ident { ref sym,.. }) => sym,
_ => return false,
};
if prop!= expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() &&!decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn create_require(specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&&!decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&&!is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if!is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.", | BailoutReason::NonStaticAccess => (
"Non-static access of an `import` or `require`. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-member-accesses"
),
}
}
}
#[macro_export]
macro_rules! fold_member_expr_skip_prop {
() => {
fn fold_member_expr(
&mut self,
mut node: swc_core::ecma::ast::MemberExpr,
) -> swc_core::ecma::ast::MemberExpr {
node.obj = node.obj.fold_with(self);
if let swc_core::ecma::ast::MemberProp::Computed(_) = node.prop {
node.prop = node.prop.fold_with(self);
}
node
}
};
}
#[macro_export]
macro_rules! id {
($ident: expr) => {
$ident.to_id()
};
} | "https://parceljs.org/features/scope-hoisting/#dynamic-imports"
), | random_line_split |
utils.rs | use std::cmp::Ordering;
use std::collections::HashSet;
use crate::id;
use serde::{Deserialize, Serialize};
use swc_core::common::{Mark, Span, SyntaxContext, DUMMY_SP};
use swc_core::ecma::ast::{self, Id};
use swc_core::ecma::atoms::{js_word, JsWord};
pub fn match_member_expr(expr: &ast::MemberExpr, idents: Vec<&str>, decls: &HashSet<Id>) -> bool {
use ast::{Expr, Ident, Lit, MemberProp, Str};
let mut member = expr;
let mut idents = idents;
while idents.len() > 1 {
let expected = idents.pop().unwrap();
let prop = match &member.prop {
MemberProp::Computed(comp) => {
if let Expr::Lit(Lit::Str(Str { value: ref sym,.. })) = *comp.expr {
sym
} else {
return false;
}
}
MemberProp::Ident(Ident { ref sym,.. }) => sym,
_ => return false,
};
if prop!= expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() &&!decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn | (specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&&!decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&&!is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if!is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-imports"
),
BailoutReason::NonStaticAccess => (
"Non-static access of an `import` or `require`. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-member-accesses"
),
}
}
}
#[macro_export]
macro_rules! fold_member_expr_skip_prop {
() => {
fn fold_member_expr(
&mut self,
mut node: swc_core::ecma::ast::MemberExpr,
) -> swc_core::ecma::ast::MemberExpr {
node.obj = node.obj.fold_with(self);
if let swc_core::ecma::ast::MemberProp::Computed(_) = node.prop {
node.prop = node.prop.fold_with(self);
}
node
}
};
}
#[macro_export]
macro_rules! id {
($ident: expr) => {
$ident.to_id()
};
}
| create_require | identifier_name |
utils.rs | use std::cmp::Ordering;
use std::collections::HashSet;
use crate::id;
use serde::{Deserialize, Serialize};
use swc_core::common::{Mark, Span, SyntaxContext, DUMMY_SP};
use swc_core::ecma::ast::{self, Id};
use swc_core::ecma::atoms::{js_word, JsWord};
pub fn match_member_expr(expr: &ast::MemberExpr, idents: Vec<&str>, decls: &HashSet<Id>) -> bool {
use ast::{Expr, Ident, Lit, MemberProp, Str};
let mut member = expr;
let mut idents = idents;
while idents.len() > 1 {
let expected = idents.pop().unwrap();
let prop = match &member.prop {
MemberProp::Computed(comp) => {
if let Expr::Lit(Lit::Str(Str { value: ref sym,.. })) = *comp.expr {
sym
} else |
}
MemberProp::Ident(Ident { ref sym,.. }) => sym,
_ => return false,
};
if prop!= expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() &&!decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn create_require(specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&&!decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&&!is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if!is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-imports"
),
BailoutReason::NonStaticAccess => (
"Non-static access of an `import` or `require`. This causes tree shaking to be disabled for the resolved module.",
"https://parceljs.org/features/scope-hoisting/#dynamic-member-accesses"
),
}
}
}
#[macro_export]
macro_rules! fold_member_expr_skip_prop {
() => {
fn fold_member_expr(
&mut self,
mut node: swc_core::ecma::ast::MemberExpr,
) -> swc_core::ecma::ast::MemberExpr {
node.obj = node.obj.fold_with(self);
if let swc_core::ecma::ast::MemberProp::Computed(_) = node.prop {
node.prop = node.prop.fold_with(self);
}
node
}
};
}
#[macro_export]
macro_rules! id {
($ident: expr) => {
$ident.to_id()
};
}
| {
return false;
} | conditional_block |
projectstate.rs | use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::de::DeserializeOwned;
use url::Url;
use uuid::Uuid;
use config::AortaConfig;
use event::StoreChangeset;
use query::{AortaQuery, GetProjectConfigQuery, QueryError, RequestManager};
use semaphore_common::ProjectId;
use upstream::UpstreamDescriptor;
/// These are config values that the user can modify in the UI.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct ProjectConfig {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned +'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send +'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> |
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if!snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset.meta.origin {
if!self.is_valid_origin(origin) {
debug!(
"{}#{} -> access denied (bad origin {})",
self.project_id, changeset.event, origin
);
return false;
}
}
match self.get_public_key_event_action(&changeset.public_key) {
PublicKeyEventAction::Queue => {
debug!("{}#{} -> pending", self.project_id, changeset.event);
self.pending_stores.write().push(PendingStore {
added_at: Instant::now(),
changeset,
});
true
}
PublicKeyEventAction::Send => {
debug!("{}#{} -> changeset", self.project_id, changeset.event);
self.request_manager
.add_changeset(self.project_id, changeset);
true
}
PublicKeyEventAction::Discard => {
debug!("{}#{} -> discarded", self.project_id, changeset.event);
false
}
}
}
/// Returns `true` if the project state is available.
pub fn snapshot_available(&self) -> bool {
self.current_snapshot.read().is_some()
}
/// Returns the current project snapshot.
pub fn snapshot(&self) -> Arc<ProjectStateSnapshot> {
self.snapshot_opt().expect("Snapshot not yet available")
}
/// Returns the current project snapshot as option.
pub fn snapshot_opt(&self) -> Option<Arc<ProjectStateSnapshot>> {
match *self.current_snapshot.read() {
Some(ref arc) => Some(arc.clone()),
None => None,
}
}
/// Sets a new snapshot.
pub fn set_snapshot(&self, new_snapshot: ProjectStateSnapshot) {
*self.current_snapshot.write() = Some(Arc::new(new_snapshot));
self.retry_pending_events();
}
/// Attempts to send all pending requests now.
fn retry_pending_events(&self) {
let snapshot = self.snapshot();
let mut to_send = vec![];
let timeout = self.config.pending_events_timeout.to_std().unwrap();
// acquire the lock locally so we can then acquire the lock later on send
// without deadlocking
{
let mut lock = self.pending_stores.write();
let pending = mem::replace(&mut *lock, Vec::new());
lock.extend(pending.into_iter().filter_map(|pending_store| {
if pending_store.added_at.elapsed() > timeout {
return None;
}
match snapshot.get_public_key_status(&pending_store.changeset.public_key) {
PublicKeyStatus::Enabled => {
to_send.push(pending_store);
None
}
PublicKeyStatus::Disabled => None,
PublicKeyStatus::Unknown => Some(pending_store),
}
}));
}
for pending_store in to_send {
debug!(
"unpend {}#{}",
self.project_id, pending_store.changeset.event
);
self.store_changeset(pending_store.changeset);
}
}
/// Sets a "project does not exist" snapshot.
///
/// This is used when the server indicates that this project does not actually
/// exist or the relay has no permissions to work with it (these are both
/// reported as the same thing to the relay).
pub fn set_missing_snapshot(&self) {
self.set_snapshot(ProjectStateSnapshot {
last_fetch: Utc::now(),
last_change: None,
disabled: true,
public_keys: HashMap::new(),
slug: None,
config: Default::default(),
rev: None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_assert_sync() {
struct Assert<T: Sync> {
x: Option<T>,
}
let val: Assert<ProjectState> = Assert { x: None };
assert_eq!(val.x.is_none(), true);
}
}
| {
*self.last_event.read()
} | identifier_body |
projectstate.rs | use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::de::DeserializeOwned;
use url::Url;
use uuid::Uuid;
use config::AortaConfig;
use event::StoreChangeset;
use query::{AortaQuery, GetProjectConfigQuery, QueryError, RequestManager};
use semaphore_common::ProjectId;
use upstream::UpstreamDescriptor;
/// These are config values that the user can modify in the UI.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct | {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned +'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send +'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> {
*self.last_event.read()
}
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if!snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset.meta.origin {
if!self.is_valid_origin(origin) {
debug!(
"{}#{} -> access denied (bad origin {})",
self.project_id, changeset.event, origin
);
return false;
}
}
match self.get_public_key_event_action(&changeset.public_key) {
PublicKeyEventAction::Queue => {
debug!("{}#{} -> pending", self.project_id, changeset.event);
self.pending_stores.write().push(PendingStore {
added_at: Instant::now(),
changeset,
});
true
}
PublicKeyEventAction::Send => {
debug!("{}#{} -> changeset", self.project_id, changeset.event);
self.request_manager
.add_changeset(self.project_id, changeset);
true
}
PublicKeyEventAction::Discard => {
debug!("{}#{} -> discarded", self.project_id, changeset.event);
false
}
}
}
/// Returns `true` if the project state is available.
pub fn snapshot_available(&self) -> bool {
self.current_snapshot.read().is_some()
}
/// Returns the current project snapshot.
pub fn snapshot(&self) -> Arc<ProjectStateSnapshot> {
self.snapshot_opt().expect("Snapshot not yet available")
}
/// Returns the current project snapshot as option.
pub fn snapshot_opt(&self) -> Option<Arc<ProjectStateSnapshot>> {
match *self.current_snapshot.read() {
Some(ref arc) => Some(arc.clone()),
None => None,
}
}
/// Sets a new snapshot.
pub fn set_snapshot(&self, new_snapshot: ProjectStateSnapshot) {
*self.current_snapshot.write() = Some(Arc::new(new_snapshot));
self.retry_pending_events();
}
/// Attempts to send all pending requests now.
fn retry_pending_events(&self) {
let snapshot = self.snapshot();
let mut to_send = vec![];
let timeout = self.config.pending_events_timeout.to_std().unwrap();
// acquire the lock locally so we can then acquire the lock later on send
// without deadlocking
{
let mut lock = self.pending_stores.write();
let pending = mem::replace(&mut *lock, Vec::new());
lock.extend(pending.into_iter().filter_map(|pending_store| {
if pending_store.added_at.elapsed() > timeout {
return None;
}
match snapshot.get_public_key_status(&pending_store.changeset.public_key) {
PublicKeyStatus::Enabled => {
to_send.push(pending_store);
None
}
PublicKeyStatus::Disabled => None,
PublicKeyStatus::Unknown => Some(pending_store),
}
}));
}
for pending_store in to_send {
debug!(
"unpend {}#{}",
self.project_id, pending_store.changeset.event
);
self.store_changeset(pending_store.changeset);
}
}
/// Sets a "project does not exist" snapshot.
///
/// This is used when the server indicates that this project does not actually
/// exist or the relay has no permissions to work with it (these are both
/// reported as the same thing to the relay).
pub fn set_missing_snapshot(&self) {
self.set_snapshot(ProjectStateSnapshot {
last_fetch: Utc::now(),
last_change: None,
disabled: true,
public_keys: HashMap::new(),
slug: None,
config: Default::default(),
rev: None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_assert_sync() {
struct Assert<T: Sync> {
x: Option<T>,
}
let val: Assert<ProjectState> = Assert { x: None };
assert_eq!(val.x.is_none(), true);
}
}
| ProjectConfig | identifier_name |
projectstate.rs | use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::de::DeserializeOwned;
use url::Url;
use uuid::Uuid;
use config::AortaConfig;
use event::StoreChangeset;
use query::{AortaQuery, GetProjectConfigQuery, QueryError, RequestManager};
use semaphore_common::ProjectId;
use upstream::UpstreamDescriptor;
/// These are config values that the user can modify in the UI.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct ProjectConfig {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned +'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send +'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> {
*self.last_event.read()
}
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref() | .and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if!snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset.meta.origin {
if!self.is_valid_origin(origin) {
debug!(
"{}#{} -> access denied (bad origin {})",
self.project_id, changeset.event, origin
);
return false;
}
}
match self.get_public_key_event_action(&changeset.public_key) {
PublicKeyEventAction::Queue => {
debug!("{}#{} -> pending", self.project_id, changeset.event);
self.pending_stores.write().push(PendingStore {
added_at: Instant::now(),
changeset,
});
true
}
PublicKeyEventAction::Send => {
debug!("{}#{} -> changeset", self.project_id, changeset.event);
self.request_manager
.add_changeset(self.project_id, changeset);
true
}
PublicKeyEventAction::Discard => {
debug!("{}#{} -> discarded", self.project_id, changeset.event);
false
}
}
}
/// Returns `true` if the project state is available.
pub fn snapshot_available(&self) -> bool {
self.current_snapshot.read().is_some()
}
/// Returns the current project snapshot.
pub fn snapshot(&self) -> Arc<ProjectStateSnapshot> {
self.snapshot_opt().expect("Snapshot not yet available")
}
/// Returns the current project snapshot as option.
pub fn snapshot_opt(&self) -> Option<Arc<ProjectStateSnapshot>> {
match *self.current_snapshot.read() {
Some(ref arc) => Some(arc.clone()),
None => None,
}
}
/// Sets a new snapshot.
pub fn set_snapshot(&self, new_snapshot: ProjectStateSnapshot) {
*self.current_snapshot.write() = Some(Arc::new(new_snapshot));
self.retry_pending_events();
}
/// Attempts to send all pending requests now.
fn retry_pending_events(&self) {
let snapshot = self.snapshot();
let mut to_send = vec![];
let timeout = self.config.pending_events_timeout.to_std().unwrap();
// acquire the lock locally so we can then acquire the lock later on send
// without deadlocking
{
let mut lock = self.pending_stores.write();
let pending = mem::replace(&mut *lock, Vec::new());
lock.extend(pending.into_iter().filter_map(|pending_store| {
if pending_store.added_at.elapsed() > timeout {
return None;
}
match snapshot.get_public_key_status(&pending_store.changeset.public_key) {
PublicKeyStatus::Enabled => {
to_send.push(pending_store);
None
}
PublicKeyStatus::Disabled => None,
PublicKeyStatus::Unknown => Some(pending_store),
}
}));
}
for pending_store in to_send {
debug!(
"unpend {}#{}",
self.project_id, pending_store.changeset.event
);
self.store_changeset(pending_store.changeset);
}
}
/// Sets a "project does not exist" snapshot.
///
/// This is used when the server indicates that this project does not actually
/// exist or the relay has no permissions to work with it (these are both
/// reported as the same thing to the relay).
pub fn set_missing_snapshot(&self) {
self.set_snapshot(ProjectStateSnapshot {
last_fetch: Utc::now(),
last_change: None,
disabled: true,
public_keys: HashMap::new(),
slug: None,
config: Default::default(),
rev: None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_assert_sync() {
struct Assert<T: Sync> {
x: Option<T>,
}
let val: Assert<ProjectState> = Assert { x: None };
assert_eq!(val.x.is_none(), true);
}
} | random_line_split |
|
network_context.rs | },
time::{Duration, SystemTime},
};
use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send +'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send +'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len()!= 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> | .await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync +'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering:: | {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
}) | identifier_body |
network_context.rs | },
time::{Duration, SystemTime},
};
use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send +'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send +'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn | (
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len()!= 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync +'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering:: | new | identifier_name |
network_context.rs | },
time::{Duration, SystemTime},
};
use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send +'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send +'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len()!= 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync +'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => |
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering:: | {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
} | conditional_block |
network_context.rs | },
time::{Duration, SystemTime}, | use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send +'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send +'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len()!= 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync +'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move || rx.recv_timeout(CHAIN_EXCHANGE_TIMEOUT)).await;
let res_duration = SystemTime::now()
.duration_since(req_pre_time)
.unwrap_or_default();
match res {
Ok(Ok(Ok(bs_res))) => {
// Successful response
peer_manager.log_success(peer_id, res_duration).await;
debug!("Succeeded: ChainExchange Request to {peer_id}");
Ok(bs_res)
}
Ok(Ok(Err(e))) => {
// Internal libp2p error, score failure for peer and potentially disconnect
match e {
RequestResponseError::ConnectionClosed
| RequestResponseError::DialFailure
| RequestResponseError::UnsupportedProtocols => {
peer_manager.mark_peer_bad(peer_id).await;
}
// Ignore dropping peer on timeout for now. Can't be confident yet that the
// specified timeout is adequate time.
RequestResponseError::Timeout => {
peer_manager.log_failure(peer_id, res_duration).await;
}
}
debug!("Failed: ChainExchange Request to {peer_id}");
Err(format!("Internal libp2p error: {e:?}"))
}
Ok(Err(_)) | Err(_) => {
// Sender channel internally dropped or timeout, both should log failure which
// will negatively score the peer, but not drop yet.
peer_manager.log_failure(peer_id, res_duration).await;
debug!("Timeout: ChainExchange Request to {peer_id}");
Err(format!("Chain exchange request to {peer_id} timed out"))
}
}
}
/// Send a hello request to the network (does not immediately await
/// response).
pub async fn hello_request(
&self,
peer_id: PeerId,
request: HelloRequest,
) -> anyhow::Result<(PeerId, SystemTime, Option<HelloResponse>)> {
trace!("Sending Hello Message to {}", peer_id);
// Create oneshot channel for receiving response from sent hello.
let (tx, rx) = flume::bounded(1);
// Send request into libp2p service
self.network_send
.send_async(NetworkMessage::HelloRequest {
peer_id,
request,
response_channel: tx,
})
.await
.context("Failed to send hello request: receiver dropped")?;
const HELLO_TIMEOUT: Duration = Duration::from_secs(5);
let sent = SystemTime::now();
let res = tokio::task::spawn_blocking(move || rx.recv_timeout(HELLO_TIMEOUT))
.await?
.ok();
Ok((peer_id, sent, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicBool, AtomicUsize};
#[tokio::test]
async fn race_batch_ok() {
let mut batch = RaceBatch::new(3);
batch.add(async move { Ok(1) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(1));
}
#[tokio::test]
async fn race_batch_ok_faster() {
let mut batch = RaceBatch::new(3);
batch.add(async move {
tokio::time::sleep(Duration::from_secs(100)).await;
Ok(1)
});
batch.add(async move { Ok(2) });
batch.add(async move { Err("kaboom".into()) });
assert_eq!(batch.get_ok().await, Some(2));
}
#[tokio::test]
async fn race_batch_none() {
let mut batch: RaceBatch<i32> = RaceBatch::new(3);
batch.add(async move { Err("kaboom".into()) });
batch.add(async move { Err("banana".into()) });
assert_eq!(batch.get_ok().await, None);
}
#[tokio::test]
async fn race_batch_semaphore() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed);
}
tokio::task::yield_now().await;
c.fetch_sub(1, Ordering::Relaxed);
Err("banana".into())
});
}
assert_eq!(batch.get_ok().await, None);
assert!(!exceeded.load(Ordering::Relaxed));
}
#[tokio::test]
async fn race_batch_semaphore_exceeded() {
const MAX_JOBS: usize = 30;
let counter = Arc::new(AtomicUsize::new(0));
let exceeded = Arc::new(AtomicBool::new(false));
// We add one more job to exceed the limit
let mut batch: RaceBatch<i32> = RaceBatch::new(MAX_JOBS + 1);
for _ in 0..10000 {
let c = counter.clone();
let e = exceeded.clone();
batch.add(async move {
let prev = c.fetch_add(1, Ordering::Relaxed);
if prev >= MAX_JOBS {
e.fetch_or(true, Ordering::Relaxed | };
| random_line_split |
main.rs | _num() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin()
.read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
println!("The secret number is: {}", secret_number);
}
fn do_compound() {
let x: (i32, f64, u8) = (500, 6.4, 1);
let five_hundred = x.0;
let six_point_four = x.1;
let one = x.2;
println!(
"five_hundred: {}, six_point_four:{}, other:{}",
five_hundred, six_point_four, one
);
let a: [i32; 5] = [1, 2, 3, 4, 5];
println!(" Array element :{}", a[0]);
}
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &s[0..i];
}
}
&s[..]
}
fn string_slice() {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word)
}
use std::collections::HashMap;
fn do_map() {
let mut map = HashMap::new();
map.insert(1, 2);
println!("map :{:?}", map);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let mut scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scores map :{:?}", scores);
for (key, value) in &scores {
println!("key:{}: value: {}", key, value);
}
let team_name = String::from("Blue");
println! {"team name : {:?}", scores.get(&team_name)};
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(10);
//println!("word: {}", word);
*count += 1;
println!("count:{}", *count);
}
println!("{:?}", map);
//
let mut s = String::from("你好");
s.push_str(", Bruce Li!");
s.push('耶');
println!("{}", s);
let s1 = String::from("Rust, ");
let s2 = String::from("faster!");
//// note s1 has been moved here and can no longer be used
let s3 = s1 + &s2;
println!("s3:{}", s3);
do_string();
}
fn do_string() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = s1 + "-" + &s2 + "-" + &s3;
println!("s: {}", s);
let s4 = String::from("suffix!");
let s = format!("{}-{}-{}", s2, s3, s4);
println!("s: {}", s);
//.bytes() //raw number
// for c in s.chars() {
// println!("{}", c);
// }
}
fn do_err() {
use std::fs::File;
//other way: let f = File::open("hello.txt").unwrap();
//let f = File::open("hello.txt").expect("Failed to open hello.txt");
let f = File::open("README.md");
let f = match f {
Ok(file) => file,
Err(error) => panic!("Problem opening the file: {:?}", error),
};
//A Shortcut for Propagating Errors: the? Operator
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
//Another way we could implement largest is for the function to
// return a reference to a T value in the slice. I
fn get_gt<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T, U> {
x: T,
y: U,
}
impl<T, U> Point<T, U> {
fn mixup<V, W>(self, other: Point<V, W>) -> Point<T, W> {
Point {
x: self.x,
y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y','m', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(&self);
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if!self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
}
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>; | // }
//// Item is the placeholder type.
///
// 4. Fully Qualified Syntax for Disambiguation: Calling Methods with the Same Name
trait Pilot {
fn fly(&self);
}
trait Wizard {
fn fly(&self);
}
struct Human;
impl Pilot for Human {
fn fly(&self) {
println!("This is your captain speaking. Pilot!");
}
}
impl Wizard for Human {
fn fly(&self) {
println!("Wizard, up!");
}
}
impl Human {
fn fly(&self) {
println!("*waving arms furiously*");
}
}
fn do_advanced_trait2() {
let person = Human;
Pilot::fly(&person);
Wizard::fly(&person);
person.fly();
}
trait Animal {
fn baby_name() -> String;
}
struct Dog;
impl Dog {
fn baby_name() -> String {
String::from("Spot")
}
}
impl Animal for Dog {
fn baby_name() -> String {
String::from("puppy")
}
}
fn do_advanced_trait3() {
println!("A baby dog is called a {}", Dog::baby_name());
println!("A baby dog is called a {}", <Dog as Animal>::baby_name());
}
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
struct PointXY {
x: i32,
y: i32,
}
impl OutlinePrint for PointXY {}
impl fmt::Display for PointXY {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
//5. Using Super-traits to Require One Trait’s Functionality Within Another Trait
fn do_advanced_trait4() {
let xy = PointXY { x: 10, y: 30 };
xy.outline_print();
}
//6. Using the New-type Pattern to Implement External Traits on External Types
struct Wrapper(Vec<String>);
impl fmt::Display for Wrapper {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.0.join(", "))
}
}
fn do_advanced_trait5() {
let w = Wrapper(vec![String::from("Hi, "), String::from("Rust!")]);
println!("w = {}", w);
}
fn do_trait_dispatch() {
do_static_dispatch();
do_advanced_trait(); | random_line_split |
|
main.rs | _num() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin()
.read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
println!("The secret number is: {}", secret_number);
}
fn do_compound() {
let x: (i32, f64, u8) = (500, 6.4, 1);
let five_hundred = x.0;
let six_point_four = x.1;
let one = x.2;
println!(
"five_hundred: {}, six_point_four:{}, other:{}",
five_hundred, six_point_four, one
);
let a: [i32; 5] = [1, 2, 3, 4, 5];
println!(" Array element :{}", a[0]);
}
fn first_word(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b''{
return &s[0..i];
}
}
&s[..]
}
fn string_slice() {
let my_string = String::from("Rust Async");
// first_word works on slices of `String`s
let _word = first_word(&my_string[..]);
let my_string_literal = "Rust Async";
// first_word works on slices of string literals
let _word = first_word(&my_string_literal[..]);
// Because string literals *are* string slices already,
// this works too, without the slice syntax!
let _word = first_word(my_string_literal);
println!(" word: {}", _word)
}
use std::collections::HashMap;
fn do_map() {
let mut map = HashMap::new();
map.insert(1, 2);
println!("map :{:?}", map);
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
let mut scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
println!("scores map :{:?}", scores);
for (key, value) in &scores {
println!("key:{}: value: {}", key, value);
}
let team_name = String::from("Blue");
println! {"team name : {:?}", scores.get(&team_name)};
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(10);
//println!("word: {}", word);
*count += 1;
println!("count:{}", *count);
}
println!("{:?}", map);
//
let mut s = String::from("你好");
s.push_str(", Bruce Li!");
s.push('耶');
println!("{}", s);
let s1 = String::from("Rust, ");
let s2 = String::from("faster!");
//// note s1 has been moved here and can no longer be used
let s3 = s1 + &s2;
println!("s3:{}", s3);
do_string();
}
fn do_string() {
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
let s = s1 + "-" + &s2 + "-" + &s3;
println!("s: {}", s);
let s4 = String::from("suffix!");
let s = format!("{}-{}-{}", s2, s3, s4);
println!("s: {}", s);
//.bytes() //raw number
// for c in s.chars() {
// println!("{}", c);
// }
}
fn do_err() {
use std::fs::File;
//other way: let f = File::open("hello.txt").unwrap();
//let f = File::open("hello.txt").expect("Failed to open hello.txt");
let f = File::open("README.md");
let f = match f {
Ok(file) => file,
Err(error) => panic!("Problem opening the file: {:?}", error),
};
//A Shortcut for Propagating Errors: the? Operator
}
fn largest(list: &[i32]) -> i32 {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
//Another way we could implement largest is for the function to
// return a reference to a T value in the slice. I
fn get_gt<T: PartialOrd + Copy>(list: &[T]) -> T {
let mut largest = list[0];
for &item in list.iter() {
if item > largest {
largest = item;
}
}
largest
}
struct Point<T, U> {
x: T,
y: U,
}
impl<T, U> Point<T, U> {
fn mixup<V, W>(self, other: Point<V, W>) -> Point<T, W> {
Point {
x: self.x,
y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y','m', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(& | );
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if!self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
}
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>;
// }
//// Item is the placeholder type.
///
// 4. Fully Qualified Syntax for Disambiguation: Calling Methods with the Same Name
trait Pilot {
fn fly(&self);
}
trait Wizard {
fn fly(&self);
}
struct Human;
impl Pilot for Human {
fn fly(&self) {
println!("This is your captain speaking. Pilot!");
}
}
impl Wizard for Human {
fn fly(&self) {
println!("Wizard, up!");
}
}
impl Human {
fn fly(&self) {
println!("*waving arms furiously*");
}
}
fn do_advanced_trait2() {
let person = Human;
Pilot::fly(&person);
Wizard::fly(&person);
person.fly();
}
trait Animal {
fn baby_name() -> String;
}
struct Dog;
impl Dog {
fn baby_name() -> String {
String::from("Spot")
}
}
impl Animal for Dog {
fn baby_name() -> String {
String::from("puppy")
}
}
fn do_advanced_trait3() {
println!("A baby dog is called a {}", Dog::baby_name());
println!("A baby dog is called a {}", <Dog as Animal>::baby_name());
}
trait OutlinePrint: fmt::Display {
fn outline_print(&self) {
let output = self.to_string();
let len = output.len();
println!("{}", "*".repeat(len + 4));
println!("*{}*", " ".repeat(len + 2));
println!("* {} *", output);
println!("*{}*", " ".repeat(len + 2));
println!("{}", "*".repeat(len + 4));
}
}
struct PointXY {
x: i32,
y: i32,
}
impl OutlinePrint for PointXY {}
impl fmt::Display for PointXY {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
//5. Using Super-traits to Require One Trait’s Functionality Within Another Trait
fn do_advanced_trait4() {
let xy = PointXY { x: 10, y: 30 };
xy.outline_print();
}
//6. Using the New-type Pattern to Implement External Traits on External Types
struct Wrapper(Vec<String>);
impl fmt::Display for Wrapper {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.0.join(", "))
}
}
fn do_advanced_trait5() {
let w = Wrapper(vec![String::from("Hi, "), String::from("Rust!")]);
println!("w = {}", w);
}
fn do_trait_dispatch() {
do_static_dispatch();
do_advanced_trait | self | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.