file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
pageserver.rs | ").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if !pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir, |
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access | random_line_split |
|
executor.rs | config::{CrateProperties, is_completed};
use doc_decoder::DocData;
use lib_configs;
/// Options passed to `exec_all`,
/// as in `cpp_to_rust_generator::config::Config`.
pub struct ExecConfig {
pub write_dependencies_local_paths: bool,
pub cache_usage: CacheUsage,
pub write_cache: bool,
pub debug_logging_config: DebugLoggingConfig,
pub quiet_mode: bool,
}
/// Executes generator for `libs` with given configuration.
pub fn exec_all(libs: Vec<String>,
cache_dir: PathBuf,
output_dir: PathBuf,
config: ExecConfig)
-> Result<()> {
if config.quiet_mode {
let mut logger = log::default_logger();
logger.set_category_settings(log::Status,
log::LoggerSettings {
file_path: None,
write_to_stderr: false,
});
}
let crate_templates_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).with_added("crate_templates");
let final_libs = if libs.iter().any(|x| x == "all") {
vec!["core".to_string(),
"gui".to_string(),
"widgets".to_string(),
"ui_tools".to_string(),
"3d_core".to_string(),
"3d_render".to_string(),
"3d_input".to_string(),
"3d_logic".to_string(),
"3d_extras".to_string()]
} else {
libs
};
let mut configs: Vec<Config> = Vec::new();
for sublib_name in final_libs {
let lib_cache_dir = cache_dir.with_added(format!("qt_{}", sublib_name));
let lib_crate_templates_path = crate_templates_path.with_added(&sublib_name);
let lib_output_dir = output_dir.with_added(format!("qt_{}", sublib_name));
let mut dependency_paths = Vec::new();
for dep in lib_dependencies(&sublib_name)? {
let path = cache_dir.with_added(format!("qt_{}", dep));
if !configs.iter().any(|c| c.cache_dir_path() == &path) && !is_completed(&path) {
return Err(format!("\"{}\" depends on \"{}\" but processing \
in \"{}\" directory is not completed.",
sublib_name,
dep,
path.display())
.into());
}
dependency_paths.push(path);
}
if is_completed(&lib_cache_dir) && config.cache_usage.can_skip_all() {
log::status("No processing! cpp_to_rust uses previous results.");
log::status("Run with -C0 to force full processing.");
continue;
}
configs.push(make_config(&sublib_name,
lib_cache_dir,
lib_output_dir,
lib_crate_templates_path,
dependency_paths,
&config)?);
}
exec(configs.into_iter())?;
Ok(())
}
/// Executes the generator for a single Qt module with given configuration.
fn make_config(sublib_name: &str,
cache_dir: PathBuf,
output_dir: PathBuf,
crate_templates_path: PathBuf,
dependency_paths: Vec<PathBuf>,
exec_config: &ExecConfig)
-> Result<Config> {
log::status(format!("Preparing generator config for library: {}", sublib_name));
let crate_name = format!("qt_{}", sublib_name);
let mut crate_properties = CrateProperties::new(crate_name.clone(),
versions::QT_OUTPUT_CRATES_VERSION);
let mut custom_fields = toml::value::Table::new();
let mut package_data = toml::value::Table::new();
package_data.insert("authors".to_string(),
toml::Value::Array(vec![toml::Value::String("Pavel Strakhov <[email protected]>"
.to_string())]));
let description = format!("Bindings for {} C++ library (generated automatically with cpp_to_rust project)",
lib_folder_name(sublib_name));
package_data.insert("description".to_string(), toml::Value::String(description));
let doc_url = format!("https://rust-qt.github.io/rustdoc/qt/{}", &crate_name);
package_data.insert("documentation".to_string(), toml::Value::String(doc_url));
package_data.insert("repository".to_string(),
toml::Value::String("https://github.com/rust-qt/cpp_to_rust".to_string()));
package_data.insert("license".to_string(),
toml::Value::String("MIT".to_string()));
custom_fields.insert("package".to_string(), toml::Value::Table(package_data));
crate_properties.set_custom_fields(custom_fields);
crate_properties.remove_default_build_dependencies();
let qt_build_tools_path = if exec_config.write_dependencies_local_paths {
Some(repo_crate_local_path("qt_generator/qt_build_tools")?)
} else {
None
};
crate_properties.add_build_dependency("qt_build_tools",
versions::QT_BUILD_TOOLS_VERSION,
qt_build_tools_path);
let mut config = Config::new(&output_dir, &cache_dir, crate_properties);
let installation_data = get_installation_data(sublib_name)?;
config.add_include_path(&installation_data.root_include_path);
config.add_include_path(&installation_data.lib_include_path);
for dep in lib_dependencies(&sublib_name)? {
let dep_data = get_installation_data(dep)?;
config.add_include_path(&dep_data.lib_include_path);
}
config.add_target_include_path(&installation_data.lib_include_path);
config.set_cache_usage(exec_config.cache_usage.clone());
config.set_write_dependencies_local_paths(exec_config.write_dependencies_local_paths);
config.set_write_cache(exec_config.write_cache);
config.set_quiet_mode(exec_config.quiet_mode);
config.set_debug_logging_config(exec_config.debug_logging_config.clone());
config.set_cpp_lib_version(installation_data.qt_version.as_str());
if exec_config.write_dependencies_local_paths {
log::status("Output Cargo.toml file will contain local paths of used dependencies \
(use --no-local-paths to disable).");
} else {
log::status("Local paths will not be written to the output crate. Make sure all dependencies \
are published before trying to compile the crate.");
}
// TODO: does parsing work on MacOS without adding "-F"?
config.add_include_directive(&lib_folder_name(sublib_name));
let lib_include_path = installation_data.lib_include_path.clone();
config.add_cpp_data_filter(move |cpp_data| fix_header_names(cpp_data, &lib_include_path));
config.add_cpp_parser_arguments(vec!["-fPIC", "-fcxx-exceptions"]);
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-std=gnu++11");
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-fPIC");
// msvc and mingw don't need this
config
.cpp_build_config_mut()
.add(target::Condition::OS(target::OS::Windows).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.set_library_type(CppLibraryType::Shared);
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc), data);
}
if target::current_env() == target::Env::Msvc {
config.add_cpp_parser_argument("-std=c++14");
} else {
config.add_cpp_parser_argument("-std=gnu++11");
}
config.add_cpp_parser_blocked_name("qt_check_for_QGADGET_macro");
let sublib_name_clone = sublib_name.to_string();
let docs_path = installation_data.docs_path.clone();
config.add_cpp_data_filter(move |cpp_data| {
match DocData::new(&sublib_name_clone, &docs_path) {
Ok(doc_data) => {
let mut parser = DocParser::new(doc_data);
find_methods_docs(&mut cpp_data.methods, &mut parser)?; | type1.doc = Some(doc.0);
if let CppTypeKind::Enum { ref mut values } = type1.kind {
let enum_namespace = if let Some(index) = type1.name.rfind("::") {
type1.name[0..index + 2].to_string()
} else {
String::new()
};
for value in values {
if let Some(r) = doc.1.iter().find(|x| x.name == value.name) {
value.doc = Some(r.html.clone());
// let full_name = format!("{}::{}", enum_namespace, &value.name);
// println!("full name: {}", full_name);
parser.mark_enum_variant_used(&format!("{}{}", enum_namespace, &value.name));
} else {
let type_name = &type1.name;
log::llog(log::DebugQtDoc, || {
format!("Not found doc for enum variant: {}::{}",
type_name,
&value.name)
});
}
}
}
}
Err(err) => {
log::llog(log::DebugQtDoc,
|| format!("Not found doc for type: {}: {}", type1.name, err));
}
}
}
parser.report_unused | for type1 in &mut cpp_data.types {
match parser.doc_for_type(&type1.name) {
Ok(doc) => {
// log::debug(format!("Found doc for type: {}", type1.name)); | random_line_split |
executor.rs | "ui_tools".to_string(),
"3d_core".to_string(),
"3d_render".to_string(),
"3d_input".to_string(),
"3d_logic".to_string(),
"3d_extras".to_string()]
} else {
libs
};
let mut configs: Vec<Config> = Vec::new();
for sublib_name in final_libs {
let lib_cache_dir = cache_dir.with_added(format!("qt_{}", sublib_name));
let lib_crate_templates_path = crate_templates_path.with_added(&sublib_name);
let lib_output_dir = output_dir.with_added(format!("qt_{}", sublib_name));
let mut dependency_paths = Vec::new();
for dep in lib_dependencies(&sublib_name)? {
let path = cache_dir.with_added(format!("qt_{}", dep));
if !configs.iter().any(|c| c.cache_dir_path() == &path) && !is_completed(&path) {
return Err(format!("\"{}\" depends on \"{}\" but processing \
in \"{}\" directory is not completed.",
sublib_name,
dep,
path.display())
.into());
}
dependency_paths.push(path);
}
if is_completed(&lib_cache_dir) && config.cache_usage.can_skip_all() {
log::status("No processing! cpp_to_rust uses previous results.");
log::status("Run with -C0 to force full processing.");
continue;
}
configs.push(make_config(&sublib_name,
lib_cache_dir,
lib_output_dir,
lib_crate_templates_path,
dependency_paths,
&config)?);
}
exec(configs.into_iter())?;
Ok(())
}
/// Executes the generator for a single Qt module with given configuration.
fn make_config(sublib_name: &str,
cache_dir: PathBuf,
output_dir: PathBuf,
crate_templates_path: PathBuf,
dependency_paths: Vec<PathBuf>,
exec_config: &ExecConfig)
-> Result<Config> {
log::status(format!("Preparing generator config for library: {}", sublib_name));
let crate_name = format!("qt_{}", sublib_name);
let mut crate_properties = CrateProperties::new(crate_name.clone(),
versions::QT_OUTPUT_CRATES_VERSION);
let mut custom_fields = toml::value::Table::new();
let mut package_data = toml::value::Table::new();
package_data.insert("authors".to_string(),
toml::Value::Array(vec![toml::Value::String("Pavel Strakhov <[email protected]>"
.to_string())]));
let description = format!("Bindings for {} C++ library (generated automatically with cpp_to_rust project)",
lib_folder_name(sublib_name));
package_data.insert("description".to_string(), toml::Value::String(description));
let doc_url = format!("https://rust-qt.github.io/rustdoc/qt/{}", &crate_name);
package_data.insert("documentation".to_string(), toml::Value::String(doc_url));
package_data.insert("repository".to_string(),
toml::Value::String("https://github.com/rust-qt/cpp_to_rust".to_string()));
package_data.insert("license".to_string(),
toml::Value::String("MIT".to_string()));
custom_fields.insert("package".to_string(), toml::Value::Table(package_data));
crate_properties.set_custom_fields(custom_fields);
crate_properties.remove_default_build_dependencies();
let qt_build_tools_path = if exec_config.write_dependencies_local_paths {
Some(repo_crate_local_path("qt_generator/qt_build_tools")?)
} else {
None
};
crate_properties.add_build_dependency("qt_build_tools",
versions::QT_BUILD_TOOLS_VERSION,
qt_build_tools_path);
let mut config = Config::new(&output_dir, &cache_dir, crate_properties);
let installation_data = get_installation_data(sublib_name)?;
config.add_include_path(&installation_data.root_include_path);
config.add_include_path(&installation_data.lib_include_path);
for dep in lib_dependencies(&sublib_name)? {
let dep_data = get_installation_data(dep)?;
config.add_include_path(&dep_data.lib_include_path);
}
config.add_target_include_path(&installation_data.lib_include_path);
config.set_cache_usage(exec_config.cache_usage.clone());
config.set_write_dependencies_local_paths(exec_config.write_dependencies_local_paths);
config.set_write_cache(exec_config.write_cache);
config.set_quiet_mode(exec_config.quiet_mode);
config.set_debug_logging_config(exec_config.debug_logging_config.clone());
config.set_cpp_lib_version(installation_data.qt_version.as_str());
if exec_config.write_dependencies_local_paths {
log::status("Output Cargo.toml file will contain local paths of used dependencies \
(use --no-local-paths to disable).");
} else {
log::status("Local paths will not be written to the output crate. Make sure all dependencies \
are published before trying to compile the crate.");
}
// TODO: does parsing work on MacOS without adding "-F"?
config.add_include_directive(&lib_folder_name(sublib_name));
let lib_include_path = installation_data.lib_include_path.clone();
config.add_cpp_data_filter(move |cpp_data| fix_header_names(cpp_data, &lib_include_path));
config.add_cpp_parser_arguments(vec!["-fPIC", "-fcxx-exceptions"]);
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-std=gnu++11");
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.add_compiler_flag("-fPIC");
// msvc and mingw don't need this
config
.cpp_build_config_mut()
.add(target::Condition::OS(target::OS::Windows).negate(), data);
}
{
let mut data = CppBuildConfigData::new();
data.set_library_type(CppLibraryType::Shared);
config
.cpp_build_config_mut()
.add(target::Condition::Env(target::Env::Msvc), data);
}
if target::current_env() == target::Env::Msvc {
config.add_cpp_parser_argument("-std=c++14");
} else {
config.add_cpp_parser_argument("-std=gnu++11");
}
config.add_cpp_parser_blocked_name("qt_check_for_QGADGET_macro");
let sublib_name_clone = sublib_name.to_string();
let docs_path = installation_data.docs_path.clone();
config.add_cpp_data_filter(move |cpp_data| {
match DocData::new(&sublib_name_clone, &docs_path) {
Ok(doc_data) => {
let mut parser = DocParser::new(doc_data);
find_methods_docs(&mut cpp_data.methods, &mut parser)?;
for type1 in &mut cpp_data.types {
match parser.doc_for_type(&type1.name) {
Ok(doc) => {
// log::debug(format!("Found doc for type: {}", type1.name));
type1.doc = Some(doc.0);
if let CppTypeKind::Enum { ref mut values } = type1.kind {
let enum_namespace = if let Some(index) = type1.name.rfind("::") {
type1.name[0..index + 2].to_string()
} else {
String::new()
};
for value in values {
if let Some(r) = doc.1.iter().find(|x| x.name == value.name) {
value.doc = Some(r.html.clone());
// let full_name = format!("{}::{}", enum_namespace, &value.name);
// println!("full name: {}", full_name);
parser.mark_enum_variant_used(&format!("{}{}", enum_namespace, &value.name));
} else {
let type_name = &type1.name;
log::llog(log::DebugQtDoc, || {
format!("Not found doc for enum variant: {}::{}",
type_name,
&value.name)
});
}
}
}
}
Err(err) => {
log::llog(log::DebugQtDoc,
|| format!("Not found doc for type: {}: {}", type1.name, err));
}
}
}
parser.report_unused_anchors();
}
Err(err) => {
log::error(format!("Failed to get Qt documentation: {}", err));
err.discard_expected();
}
}
Ok(())
});
config.set_crate_template_path(crate_templates_path);
match sublib_name {
"core" => lib_configs::core(&mut config)?,
"gui" => lib_configs::gui(&mut config)?,
"widgets" => lib_configs::widgets(&mut config)?,
"3d_core" => lib_configs::core_3d(&mut config)?,
"3d_render" => lib_configs::render_3d(&mut config)?,
"3d_input" => lib_configs::input_3d(&mut config)?,
"3d_logic" => lib_configs::logic_3d(&mut config)?,
"3d_extras" => lib_configs::extras_3d(&mut config)?,
"ui_tools" => {}
_ => return Err(format!("Unknown lib name: {}", sublib_name).into()),
}
config.set_dependency_cache_paths(dependency_paths);
Ok(config)
}
/// Adds documentation from `data` to `cpp_methods`.
fn | find_methods_docs | identifier_name |
|
set3.rs | _base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1, ...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2 != 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> | {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
} | identifier_body |
|
set3.rs | HRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new(); | for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]); | random_line_split |
|
set3.rs | _string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4gdGhlIGNhc3VhbCBjb21lZHk7")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=")?);
base64_strings.push(from_base64_string("VHJhbnNmb3JtZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
Ok(base64_strings)
}
pub fn encrypt_plaintexts_with_same_nonce(plaintexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
let key = random_bytes(16)?;
let nonce = 0;
let mut result = Vec::new();
for plaintext in plaintexts {
result.push(aes_128_ctr(&key, nonce, plaintext)?);
}
Ok(result)
}
pub fn break_ctr_with_same_nonce(ciphertexts: &[Vec<u8>]) -> Result<Vec<Vec<u8>>> {
// since we used the same nonce for each ciphertext
// it means we used a single "fixed xor" key
// for each
// that means, we can transpose the individual bytes of
// the ciphertext, same way as we did before
// however, we have to do it on a block by block basis
// eg
// [ d2 ab 03 ] [ b5 ]
// [ f3 e9 b8 ] [ 6f ]
//
// [ K1 K2 K3 ] [ K4 ]
// K1..K4 is fixed xor "key"
let max_length = ciphertexts.iter()
.map(|c| c.len())
.max()
.unwrap_or(1);
let mut keystream_bytes = Vec::new();
for i in 0..max_length {
let mut single_byte_xor_ciphertext = Vec::new();
for ciphertext in ciphertexts {
if let Some(&c) = ciphertext.get(i) {
single_byte_xor_ciphertext.push(c);
}
}
let (_, byte) = decrypt_single_byte_xor_cipher(&single_byte_xor_ciphertext);
keystream_bytes.push(byte);
}
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &keystream_bytes));
}
Ok(result)
}
pub fn break_ctr_with_same_nonce_as_repeating_key_xor(ciphertexts: &[Vec<u8>])
-> Result<Vec<Vec<u8>>> {
let min_length = ciphertexts.iter()
.map(|c| c.len())
.min()
.unwrap_or(1);
let mut concated_ciphertext = Vec::new();
for ciphertext in ciphertexts {
println!("{:?}", ciphertext.len());
concated_ciphertext.extend(&ciphertext[..min_length]);
}
let (_, key) = break_repeating_key_xor(&concated_ciphertext, min_length..min_length + 1);
let mut result = Vec::new();
for ciphertext in ciphertexts {
result.push(fixed_xor(ciphertext, &key));
}
// this only extracts min_length bytes for each ciphertext
// TODO extract the rest of the plaintexts... but i'm lazy :)
Ok(result)
}
pub fn mersenne_rng(seed: u32) -> u32 {
MersenneTwister::new(seed).gen() as u32
}
pub fn crack_mt19937_seed(output: u32, unix_timestamp: u32) -> u32 {
(0..10000)
.map(|i| {
let mut rng = MersenneTwister::new(unix_timestamp - i);
(unix_timestamp - i, rng.gen() as u32)
})
.find(|&(_, out)| out == output)
.unwrap()
.0
}
pub fn crack_mt19937_state(outputs: &[u32]) -> Vec<u32> {
outputs.iter()
.map(|&output| {
// state = [seed, 1812433253 * seed ^ (seed >> 30) + 1, ...], index = 624
// x_a = (seed & 0x80000000 + (1812433253 * seed ^ (seed >> 30) + 1) & 0x7fffffff) >> 1
// state[0] = if x_a % 2 != 0 { x_a ^ 0x9908B0DF } else { x_a }
// y = state[0]
let mut y = output;
// (4) y = y ^ (y >> 18)
// since more than half of the bits are the same, its very easy to recover
y ^= y >> 18;
// (3) y = y ^ ((y << 15) & 0xEFC60000)
// since more than half of the bits are the same, its very easy to recover again
y ^= (y << 15) & 0xEFC60000;
// (2) y = y ^ ((y << 7) & 0x9D2C5680
// this is harder to recover, need to rebuild it up from the right side
let mut y2 = y & 0x0000007F;
for i in 7..32 {
let bit_mask = 1 << i;
let b_bit = 0x9D2C5680 & bit_mask;
let y2_shifted_bit = (y2 << 7) & bit_mask;
let mask = y2_shifted_bit & b_bit;
let y2_bit = (y ^ mask) & bit_mask;
y2 ^= y2_bit;
}
y = y2;
// (1) y = y ^ (y >> 11)
// this is harder to recover
let mut y1 = y & 0xFFE00000;
for i in 12..33 {
let bit_mask = 1 << (32 - i);
let y1_shifted_bit = (y1 >> 11) & bit_mask;
let y_masked_bit = y & bit_mask;
let y_bit = y1_shifted_bit ^ y_masked_bit;
y1 ^= y_bit;
}
y = y1;
y
})
.collect::<Vec<_>>()
}
pub fn mt19937_fixed_xor(seed: u16, data: &[u8]) -> Vec<u8> {
let key: Vec<_> = MersenneTwister::new(seed as u32).keystream().take(data.len()).collect();
fixed_xor(data, &key)
}
pub fn get_mt19937_ciphertext() -> Result<(u16, Vec<u8>)> {
let mut thread_rng = rand::thread_rng();
let prefix_len = Range::new(0, u8::MAX).ind_sample(&mut thread_rng);
let mut plaintext = random_bytes(prefix_len as usize)?;
plaintext.extend(b"AAAAAAAAAAAAAA");
let seed = Range::new(0, u16::MAX).ind_sample(&mut thread_rng);
Ok((seed, mt19937_fixed_xor(seed, &plaintext)))
}
pub fn break_mt19937_ciphertext(ciphertext: &[u8]) -> (u16, Vec<u8>) {
(0..u16::MAX)
.into_par_iter()
.map(|seed| (seed, mt19937_fixed_xor(seed, ciphertext)))
.find_any(|&(_, ref plaintext)| &plaintext[plaintext.len() - 14..] == b"AAAAAAAAAAAAAA")
.unwrap()
}
pub fn | generate_password_reset_token | identifier_name |
|
set3.rs | xored with previous ciphertext block
// creates an intermediate state.
// however, if a server leaks information about the padding of a block
// (by returning 500 when a block is not padded for example)
// then we can calculate this intermediate state and xor the previous
// real ciphertext block with the intermediate state to get the plaintext
// instantly
let mut result = VecDeque::new();
// to calculate the intermediate state, we can send this:
// c1' c2 => p1' p2'
// where c2 is the last block of ciphertext, and c1' is attacker controlled.
// c1 is the second last block of the ciphertext.
// the first and only byte (z) that triggers the leak will help us calculate
// the intermediate state
// i = z ^ p'
// p = c1[16] ^ i
for n in (0..ciphertext.len() / 16).rev() {
let current_block = &ciphertext[n * 16..(n + 1) * 16];
let previous_block = if n == 0 {
iv
} else {
&ciphertext[(n - 1) * 16..n * 16]
};
let mut c1_suffix = VecDeque::new();
for i in (0..16).rev() {
let padding = 16 - i as u8;
for c in &mut c1_suffix {
*c ^= (padding - 1) ^ padding;
}
for z in 0..u8::MAX {
// C1' C2
let mut oracle_blocks = vec![0; i];
oracle_blocks.push(z);
oracle_blocks.extend(&c1_suffix);
oracle_blocks.extend(current_block);
if padding_oracle(&oracle_blocks) |
}
}
}
let vec = Vec::from(result);
if is_pkcs7_padded(&vec) {
unpad_pkcs7(&vec)
} else {
Ok(vec)
}
}
pub fn get_base64_strings() -> Result<Vec<Vec<u8>>> {
let mut base64_strings = Vec::new();
base64_strings.push(from_base64_string("SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==")?);
base64_strings.push(from_base64_string("Q29taW5nIHdpdGggdml2aWQgZmFjZXM=")?);
base64_strings.push(from_base64_string("RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==")?);
base64_strings.push(from_base64_string("RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=")?);
base64_strings.push(from_base64_string("SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk")?);
base64_strings.push(from_base64_string("T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=")?);
base64_strings.push(from_base64_string("UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==")?);
base64_strings.push(from_base64_string("QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=")?);
base64_strings.push(from_base64_string("T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl")?);
base64_strings.push(from_base64_string("VG8gcGxlYXNlIGEgY29tcGFuaW9u")?);
base64_strings.push(from_base64_string("QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==")?);
base64_strings.push(from_base64_string("QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=")?);
base64_strings.push(from_base64_string("QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==")?);
base64_strings.push(from_base64_string("QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=")?);
base64_strings.push(from_base64_string("QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=")?);
base64_strings.push(from_base64_string("VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==")?);
base64_strings.push(from_base64_string("SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==")?);
base64_strings.push(from_base64_string("SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==")?);
base64_strings.push(from_base64_string("VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==")?);
base64_strings.push(from_base64_string("V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==")?);
base64_strings.push(from_base64_string("V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==")?);
base64_strings.push(from_base64_string("U2hlIHJvZGUgdG8gaGFycmllcnM/")?);
base64_strings.push(from_base64_string("VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=")?);
base64_strings.push(from_base64_string("QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=")?);
base64_strings.push(from_base64_string("V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=")?);
base64_strings.push(from_base64_string("SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==")?);
base64_strings.push(from_base64_string("U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==")?);
base64_strings.push(from_base64_string("U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=")?);
base64_strings.push(from_base64_string("VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==")?);
base64_strings.push(from_base64_string("QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu")?);
base64_strings.push(from_base64_string("SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=")?);
base64_strings.push(from_base64_string("VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs")?);
base64_strings.push(from_base64_string("WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=")?);
base64_strings.push(from_base64_string("SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0")?);
base64_strings.push(from_base64_string("SW4 | {
result.push_front(previous_block[i] ^ z ^ padding);
c1_suffix.push_front(z);
break;
} | conditional_block |
mm.rs | amount of virtual memory to reserve, including the guard
// size.
let reserve_size = GUARD_PAGE_SIZE.checked_add(size as u64)
.expect("Integer overflow on virtual region size");
// Get a new virtual region that is free
let ret = VirtAddr(
NEXT_FREE_VADDR.fetch_add(reserve_size, Ordering::SeqCst)
);
// If we cannot add the reserve size from the return value, then the
// virtual memory wrapped the 64-bit boundary
ret.0.checked_add(reserve_size)
.expect("Integer overflow on virtual address range");
ret
}
/// Gets access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys<'a>(paddr: PhysAddr, size: u64) -> &'a [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct | {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else {
// Check if there is room for this allocation in the free stack,
// or if we need to create | FreeListNode | identifier_name |
mm.rs | 96 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize;
} else {
// There's room in the current stack, just throw us in there
let fl = &mut *(self.head as *mut FreeListNode);
// Decrement the number of free slots
fl.free_slots -= 1;
// Store our newly freed virtual address into this slot
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize) =
vaddr;
}
}
}
}
/// A wrapper on a range set to allow implementing the `PhysMem` trait
pub struct PhysicalMemory;
impl PhysMem for PhysicalMemory {
unsafe fn translate(&mut self, paddr: PhysAddr, size: usize)
-> Option<*const u8> {
self.translate_mut(paddr, size).map(|x| x as *const u8)
}
unsafe fn translate_mut(&mut self, paddr: PhysAddr, size: usize)
-> Option<*mut u8> {
// Compute the ending physical address
let end = (size as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
})?;
// Make sure this physical address fits inside our window
if end >= KERNEL_PHYS_WINDOW_SIZE {
return None;
}
// Convert the physical address into linear mapping view address
Some((paddr.0 + KERNEL_PHYS_WINDOW_BASE) as *mut u8)
}
fn alloc_phys(&mut self, layout: Layout) -> Option<PhysAddr> {
if layout.size() <= 4096 && layout.align() <= layout.size() {
// Special case, just allocate directly from our free lists. Our
// free lists for allocations <= 4096 bytes directly map to the
// physical memory map, and are naturally aligned
unsafe {
let ptr = core!().free_list(layout).lock().pop();
Some(PhysAddr(ptr as u64 - KERNEL_PHYS_WINDOW_BASE))
}
} else {
// Get access to physical memory
let mut phys_mem = unsafe {
core!().boot_args.free_memory_ref().lock()
};
let phys_mem = phys_mem.as_mut()?;
// Could not satisfy allocation from free list, allocate
// directly from the physical memory pool
let alc = phys_mem.allocate_prefer(layout.size() as u64,
layout.align() as u64,
memory_range())?;
// Update stats
GLOBAL_ALLOCATOR.free_physical
.store(phys_mem.sum().unwrap(), Ordering::Relaxed);
Some(PhysAddr(alc as u64))
}
}
}
/// The global allocator for the bootloader, this just uses physical memory as
/// a backing and does not handle any fancy things like fragmentation. Use this
/// carefully.
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator {
num_allocs: AtomicU64::new(0),
num_frees: AtomicU64::new(0),
free_physical: AtomicU64::new(0),
free_list: AtomicU64::new(0),
};
/// Empty structure that we can implement `GlobalAlloc` for such that we can
/// use the `#[global_allocator]`
#[derive(Debug)]
pub struct GlobalAllocator {
/// Number of allocations performed
pub num_allocs: AtomicU64,
/// Number of frees performed
pub num_frees: AtomicU64,
/// Current number of free bytes in the physical memory pool, this only
/// ever decreases since we do not free back to physical memory
pub free_physical: AtomicU64,
/// Number of bytes sitting in free lists
pub free_list: AtomicU64,
}
| /// Print the allocation statistics to the screen
pub fn print_alloc_stats() {
// Get total amount of physical memory | random_line_split |
|
mm.rs | ] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *const u8,
size as usize)
}
/// Gets mutable access to a slice of physical memory
#[allow(dead_code)]
#[inline]
pub unsafe fn slice_phys_mut<'a>(paddr: PhysAddr, size: u64) -> &'a mut [u8] {
let end = size.checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
// Return out a slice to this physical memory as mutable
core::slice::from_raw_parts_mut(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut u8,
size as usize)
}
/// Read a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::read_volatile`.
#[allow(dead_code)]
pub unsafe fn read_phys<T>(paddr: PhysAddr) -> T {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on read_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::read_volatile((KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T)
}
/// Write to a physical address containing a type `T`. This just handles the
/// windowing and performs a `core::ptr::write_volatile`.
pub unsafe fn write_phys<T>(paddr: PhysAddr, val: T) {
let end = (size_of::<T>() as u64).checked_sub(1).and_then(|x| {
x.checked_add(paddr.0)
}).expect("Integer overflow on write_phys");
assert!(end < KERNEL_PHYS_WINDOW_SIZE,
"Physical address outside of window");
core::ptr::write_volatile(
(KERNEL_PHYS_WINDOW_BASE + paddr.0) as *mut T, val);
}
/// Metadata on a freed allocation
#[repr(C)]
struct FreeListNode {
/// Virtual address of the next `FreeListNode`
next: usize,
/// Number of free slots in `free_mem`
free_slots: usize,
/// Virtual addresses of free allocations
free_addrs: [*mut u8; 0],
}
/// A free list which holds free entries of `size` bytes in a semi-linked list
/// table thingy.
pub struct FreeList {
/// Pointer to the first entry in the free list
head: usize,
/// Size of allocations (in bytes) for this free list
size: usize,
}
impl FreeList {
/// Create a new, empty free list containing addresses to `size` byte
/// allocations
pub fn new(size: usize) -> Self {
// Ensure some properties of the free list size
assert!(size.count_ones() == 1,
"Free list size must be a power of two");
assert!(size >= size_of::<usize>(),
"Free list size must be at least pointer width");
FreeList { head: 0, size }
}
/// Get a address from the free list
pub unsafe fn pop(&mut self) -> *mut u8 {
// If the free list is empty
if self.head == 0 {
if self.size <= 4096 {
// Special case, if the allocation fits within a page, we can
// directly return virtual addresses to our physical memory
// map. This is significantly better for TLBs and caches than
// to create new page tables for allocating a new virtual
// address. Especially since we use large pages (if possible)
// to map in the physical map
// Get access to physical memory
let alc = {
let mut phys_mem =
core!().boot_args.free_memory_ref().lock();
let phys_mem = phys_mem.as_mut().unwrap();
// Allocate 4096 bytes of page aligned physical memory, we
// do bulk allocations here to improve performance and to
// decrease the amount of physical memory lost due to
// carving off alignment bytes
let alc = phys_mem.allocate_prefer(4096, 4096,
memory_range())
.expect("Failed to allocate physical memory") as u64;
// Update stats
GLOBAL_ALLOCATOR.free_physical.store(
phys_mem.sum().unwrap(),
Ordering::Relaxed);
alc
};
// Split up this allocation and free the segments
for offset in (0..4096).step_by(self.size) {
// Get the virtual address for this physical address
let vaddr = slice_phys_mut(
PhysAddr(alc + offset), self.size as u64).as_mut_ptr();
// Add this to the free list
self.push(vaddr);
}
} else {
// Allocation size exceeds a page, we must allocate new virtual
// memory to satisfy the allocation
// Allocate a virtual address to hold this allocation
let vaddr = alloc_virt_addr_4k(self.size as u64);
// Get access to physical memory
let mut pmem = PhysicalMemory;
// Get access to virtual memory
let mut page_table = core!().boot_args.page_table.lock();
let page_table = page_table.as_mut().unwrap();
// Map in the memory as RW
page_table.map(&mut pmem, vaddr, PageType::Page4K,
self.size as u64, true, true, false, false)
.expect("Failed to map RW memory");
// Return out the allocation
return vaddr.0 as *mut u8;
}
}
// We're about to pop from the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_sub(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// Basic linked list for super small allocations which can't hold
// our stack-based free list metadata
// Save the current head (our new allocation)
let alc = self.head as *mut FreeListNode;
// Set the head to the next node
self.head = (*alc).next;
alc as *mut u8
} else {
// Get access to the free list stack
let fl = &mut *(self.head as *mut FreeListNode);
// Check if there are any addresses on the stack
if fl.free_slots <
((self.size / core::mem::size_of::<usize>()) - 2) {
// Just grab the free entry
let alc =
*fl.free_addrs.as_mut_ptr().offset(fl.free_slots as isize);
// Update number of free slots
fl.free_slots += 1;
// Return the allocation
alc
} else {
// The free page stack is empty at this level, take the entire
// node and use it as the allocation
// Get the old head, will be our allocation
let alc = self.head;
// Update the head to point to the next entry
self.head = fl.next;
// Return out the allocation
alc as *mut u8
}
}
}
/// Put an allocation back onto the free list
pub unsafe fn push(&mut self, vaddr: *mut u8) {
// We're about to push to the free list, adjust the stats
GLOBAL_ALLOCATOR.free_list.fetch_add(self.size as u64,
Ordering::SeqCst);
if self.size <= core::mem::size_of::<usize>() * 2 {
// If the free list is too small to contain our stack free list,
// then just directly use a linked list
// Write the old head into the newly freed `vaddr`
let vaddr = vaddr as *mut FreeListNode;
(*vaddr).next = self.head;
// Update the head
self.head = vaddr as usize;
} else | {
// Check if there is room for this allocation in the free stack,
// or if we need to create a new stack
if self.head == 0 ||
(*(self.head as *const FreeListNode)).free_slots == 0 {
// No free slots, create a new stack out of the freed vaddr
let vaddr = &mut *(vaddr as *mut FreeListNode);
// Set the number of free slots to the maximum size, as all
// entries are free in the stack
// This is the size of the allocation, minus the 2 `usize`
// header (in entries)
vaddr.free_slots =
(self.size / core::mem::size_of::<usize>()) - 2;
// Update the next to point to the old head
vaddr.next = self.head;
// Establish this as the new free list head
self.head = vaddr as *mut FreeListNode as usize; | conditional_block |
|
main.rs | packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct | {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample | State | identifier_name |
main.rs | packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
const SECOND_INDICES: &[u16] = &[
0, 1, 4,
2, 3, 4,
// WGPU requires 4 bytes buffer alignment (packing)
// Above there are 9 u16 numbers which is 9 x 2 bytes
// We add one more u16 to square this
/* padding */ 0,
];
bitflags! {
struct Levers: u32 {
const LEVER1 = 0b00000001;
const LEVER2 = 0b00000010;
}
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
mouse_pos: cgmath::Point2<f64>,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
second_index_buffer: wgpu::Buffer,
second_num_indices: u32,
levers: Levers,
diffuse_bind_group: wgpu::BindGroup,
}
impl State {
async fn new(window: &Window) -> Result<Self, Box<dyn std::error::Error>> {
let size = window.inner_size();
// instance holds the handle to the GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU (they are all ORed)
// TODO: Try BackendBit::VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?; | let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample | random_line_split |
|
main.rs | let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::CursorMoved {position, ..} => {
self.mouse_pos.x = position.x;
self.mouse_pos.y = position.y;
// debug!("Mouse moved to point: {:?}", self.mouse_pos);
true
},
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state,
virtual_keycode: Some(VirtualKeyCode::Space),
..
} => match state {
ElementState::Pressed => {
self.levers = self.levers | Levers::LEVER1;
true
},
ElementState::Released => {
self.levers = self.levers & !Levers::LEVER1;
true
},
},
_ => false
},
_ => false
}
}
fn update(&mut self) {
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
let frame = self.swap_chain
.get_current_frame()?
.output;
let mut encoder = self.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}
);
{
let mut render_pass = encoder.begin_render_pass(
&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[
// This is what [[location(0)]] in the fragment shader targets
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}
}
],
depth_stencil_attachment: None,
}
);
let data = {
if self.levers.contains(Levers::LEVER1) {
(&self.second_index_buffer, self.second_num_indices)
} else | {
(&self.index_buffer, self.num_indices)
} | conditional_block |
|
main.rs | VULKAN
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// This is unsafe because on some Linux systems lifetime of the window might not be as long
// as the lifetime of the program. See: https://github.com/gfx-rs/wgpu/issues/1463
let surface = unsafe { instance.create_surface(window) };
let adapter = instance.request_adapter(
&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}
).await.expect("Can't initialize adapter with the surface.");
let format = adapter.get_swap_chain_preferred_format(&surface).expect(
"Can't get surface prefered texture format."
);
let (device, queue) = adapter.request_device(
&wgpu::DeviceDescriptor {
// Features are the capabilities of the API and the GPU
// They are not universal.
// See all features here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::empty(),
// Limits are resource limits that can be imposed.
// They are device dependent
// See all limits here: https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits::default(),
label: None, // Debug label for the device
},
None, // Trace path used for tracing API calls if `trace` features is enabled.
).await?;
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // Framerate will be capped with `VSync` frequency
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let diffuse_bytes = include_bytes!("tree.png");
let diffuse_image = image::load_from_memory(diffuse_bytes)?;
let diffuse_rgba = diffuse_image.as_rgba8().expect("Can't transform image info");
use image::GenericImageView;
let dimensions = diffuse_image.dimensions();
let texture_size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
// All textures are stored as 3D, 2D textures have depth of 1.
depth_or_array_layers: 1,
};
let diffuse_texture = device.create_texture(
&wgpu::TextureDescriptor {
// All textures are stored as 3D, 2D textures have depth of 1.
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
// SAMPLED tells WGPU to use the texture in shaders
// COPY_DST tells WGPU that we want to copy data to this texture
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: Some("diffuse_texture"),
}
);
queue.write_texture(
// Where to copy the pixel data
wgpu::ImageCopyTexture {
texture: &&diffuse_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
// The pixel data
diffuse_rgba,
// Layout of the texture
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * dimensions.0),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_size
);
let diffuse_texture_view = diffuse_texture.create_view(
&wgpu::TextureViewDescriptor::default()
);
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let texture_bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float {filterable: true},
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
label: Some("texture_bind_group_layout"),
}
);
let diffuse_bind_group = device.create_bind_group(
&wgpu::BindGroupDescriptor {
label: Some("diffuse_bind_group"),
layout: &&texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
}
],
}
);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
flags: wgpu::ShaderFlags::all(),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: wgpu::PolygonMode::Fill,
// Enabling this requires Features::DEPTH_CLAMPING to be enabled.
clamp_depth: false,
// Enabling this requires Features::CONSERVATIVE_RASTERIZATION to be enabled.
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(VERTICES),
usage: wgpu::BufferUsage::VERTEX,
}
);
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let num_indices = INDICES.len() as u32;
let second_index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Second Index Buffer"),
contents: bytemuck::cast_slice(SECOND_INDICES),
usage: wgpu::BufferUsage::INDEX,
}
);
let second_num_indices = SECOND_INDICES.len() as u32;
let levers = Levers::empty();
Ok(
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
mouse_pos: cgmath::Point2 {x: 0.0, y: 0.0},
render_pipeline,
vertex_buffer,
index_buffer,
second_index_buffer,
num_indices,
second_num_indices,
levers,
diffuse_bind_group,
}
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) | {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
} | identifier_body |
|
PEATSAplugin.py | 1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not in job already"""
job, name = self.getJob()
if job == None:
return
DB=self.DB
self.matrices = job.data.allMatrices()
for m in matrices:
matrix=matrices[m]
if matrix==None: return
muts = matrix.mutationCodes()
dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]
newmuts = list(set(dbmuts) - set(muts))
print 'the following mutations in the project are not in the job: %s' %newmuts
'''self.submitJob(name=name,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=newmuts,
calcs=calcs, meta={'expcol':expcol}) '''
self.log.yview('moveto', 1)
return
def getJob(self, name=None):
"""Get job from name"""
if name == None:
name = self.jobstable.get_selectedRecordNames()[0]
if name == None:
return None, name
jobid = self.DB.meta.peatsa_jobs[name]
try:
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
except:
#print 'job not in database'
return None,name
return job, name
def removeJob(self):
"""Remove a job from the db"""
job, name = self.getJob()
answer = tkMessageBox.askyesno("Warning",'Remove this job?')
if answer == False:
return
try:
self.jobManager.deleteJob(job)
except:
print 'job not in database, removing from peat'
del self.DB.meta.peatsa_jobs[name]
self.DB.meta.__p__changed = 1
self.updateJobs()
return
def viewDetails(self, name=None):
job, name = self.getJob()
if job==None:
return
jobmeta = job.metadata()
print
print job.data
print 'details for job %s' %name
print 'job status:',job.state()
print 'submitted on ',job.date
if jobmeta.has_key('pdbname'):
print 'original pdb file:', jobmeta['pdbname']
print 'mutations:', len(job.mutationListFile().mutantList())
print '(this job has id %s)' %job.identification
if job.error() != None:
print 'The job had an error..'
print job.error()['ErrorDescription']
print job.error()['DetailedDescription']
print
self.log.yview('moveto', 1)
return
def addColoredText(self, st, tag, word, fg='black', bg='white'):
"""add a space to the end of the word"""
word = word + " "
st.insert('end', word)
end_index = st.index('end')
begin_index = "%s-%sc" % (end_index, len(word) + 1)
st.tag_add(tag, begin_index, end_index)
st.tag_config(tag, foreground=fg, background=bg)
return
def checkJobsDict(self):
"""Check jobs data structure exists"""
if not hasattr(self.DB.meta,'peatsa_jobs'):
from ZODB.PersistentMapping import PersistentMapping
self.DB.meta.peatsa_jobs = PersistentMapping()
def storeJob(self, name, job):
"""Store job to DB"""
self.checkJobsDict()
self.DB.meta.peatsa_jobs[name] = job.identification
return
def updateJobs(self):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return
self.updateJobsTable()
self.wait=self.mainwin.after(60000, self.updateJobs)
return
def mergeResults(self, job, colname, tablemodel):
"""Merge given job results to tablemodel"""
if job==None:
return
matrices = job.data.allMatrices()
if not colname:
return
nf={'Total':colname}
for m in matrices:
matrix = matrices[m]
if matrix == None: continue
M = self.mergeMatrix(matrix, tablemodel, fields=['Total'], newfields=nf)
return
def mergeCurrent(self):
"""Auto merge selected job results to main table
called from GUI """
job, name = self.getJob()
if job==None:
return
#get field name to use
colname = tkSimpleDialog.askstring("Column name?",
"Name for column:",
initialvalue=name+'_Predictions',
parent=self.mainwin)
M = self.parent.tablemodel
self.mergeResults(job, colname, M)
self.parent.updateTable()
#also send some meta data to peatsa_meta?
'''from Correlation import CorrelationAnalyser
C = CorrelationAnalyser()
cc,rmse = C.getStats(pre,exp)
data.append({'name':p,'rmse':rmse,'cc':cc}) '''
return
def manageResults(self, name=None):
"""Get the results back - we can send the matrix to the main peat
table or put results into a labbook sheet.
Also allow user to merge with an existing table"""
job, name = self.getJob(name)
if job.error() != None:
print 'job had an error, use view details'
elif job.state() == 'Finished':
self.showPEATSAResultsDialog(job, name)
else:
print 'Job is not finished yet.'
return
def editConfigFile(self):
"""Edit config file"""
from PEATDB.textFrame import textFrame
tf = textFrame(parent=self.mainwin,
title='PEATSA Conf file')
tf.load_from_file(self.confpath)
self.parent.wait_window(tf.frame)
#reconnect
configuration = Core.Environment.Configuration(filename=self.confpath)
self.connect(configuration)
return
def showPEATSAResultsDialog(self, job, name):
resdlg = Toplevel()
resdlg.geometry('600x450+300+200')
resdlg.title('PEATSA results '+name)
balloon = Pmw.Balloon(resdlg)
self.currname = name
body = Frame(resdlg)
resdlg.initial_focus = body
body.pack(fill=BOTH,expand=1,padx=5, pady=5)
self.matrices = job.data.allMatrices()
fr=Frame(body)
fr.grid(row=0,column=0,sticky='news',rowspan=2)
for m in self.matrices:
| if self.matrices[m] != None:
self.showMatrix(fr,self.matrices[m], m) | conditional_block |
|
PEATSAplugin.py | states.log',interval=60)
print '\nConnection to server made sucessfully.\n'
return
def createMutationList(self, filename=None):
self.mutationList = Core.Data.MutationListFile(create=False)
return
def fetchJob(self):
"""Get job from it's db ID and add to list"""
mpDlg = MultipleValDialog(title='Get Job',
initialvalues=('','my job1'),
labels=('ID','Your label',),
types=('string','string'),
parent=self.mainwin)
if mpDlg.result == True:
jobid = mpDlg.results[0]
name = mpDlg.results[1]
else:
return
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
if job != None:
print 'adding job id %s to list' %job.identification
self.storeJob(name, job)
self.updateJobs()
return
def writetempPDB(self,name=None,pdbfile='refprot.pdb'):
if name==None:
name = self.DB.meta.refprotein
pdblines = self.DB[name].Structure
#pdbfile = 'refprot.pdb'
fd=open(pdbfile,'w')
for line in pdblines:
fd.write(line)
fd.close()
return pdbfile
def getrefPDBName(self):
name = self.DB.meta.refprotein
if self.DB[name].has_key('pdbname'):
name = self.DB[name]['pdbname']
return name.split('.')[0]
else:
return ''
def createJobDialog(self):
"""Get details from user using dialog
required: structure, mutations, type of calc and a tag (optional)"""
def | (text):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return 1
if text in self.DB.meta.peatsa_jobs:
return -1
else:
return 1
def close():
jobdlg.destroy()
def loadmuts():
filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("All files","*")])
if filename:
mutlist.importfile(filename)
return
def loadmutsfromDB():
for p in self.DB.getRecs():
mut = self.DB.get(p).Mutations
if mut == None or mut=='':
continue
if type(mut) is types.StringType:
mutlist.appendtext(mut+'\n')
else:
mutstring = mut.getMutationString()
if mutstring != None:
mutlist.appendtext(mutstring+'\n')
return
def getstruct():
filename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("pdb","*.pdb"),("All files","*.*")])
pdbentry.setvalue(filename)
return
def getligand():
self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("mol2","*.mol2"),("All files","*.*")])
def submit():
#if calcmenu.getcurselection() == 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not | validatename | identifier_name |
PEATSAplugin.py |
def manageJobsButtons(self, parent):
fr1 = Frame(parent)
Button(fr1,text='View Results',command=self.showAllResults,bg='#ccFFFF').pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
Button(fr1,text='Merge Results',command=self.mergeCurrent).pack(side=TOP,fill=BOTH,expand=1)
fr1.pack(fill=BOTH)
fr = Frame(parent)
c='#ADD8E6'
Button(fr,text='Show Details',command=self.viewDetails,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Manage Results',command=self.manageResults,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Remove',command=self.removeJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
Button(fr,text='Resubmit',command=self.resubmitJob,bg=c).pack(side=LEFT,fill=BOTH,expand=1)
fr.pack(fill=BOTH)
return
def createLogWin(self, parent):
log = Pmw.ScrolledText(parent,
borderframe=1,
labelpos = 'n',
label_text='Log',
usehullsize = 1,
hull_width = 800,
hull_height = 200,
text_wrap='word')
return log
def stdout2Log(self):
"""Redirect stdout to app control"""
sys.stdout = self
sys.stderr = self
return
def log2Stdout(self):
"""return to stdout"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return
def write(self, txt):
"""Handle stdout if required"""
self.log.appendtext(txt)
self.log.update_idletasks()
return
def flush(self):
return
def connect(self, configuration):
"""Create connection"""
self.connection = PEATSA.WebApp.UtilityFunctions.ConnectionFromConfiguration(configuration)
self.jobManager = PEATSA.WebApp.Data.JobManager(self.connection)
self.jobManager.setJobStateLogging('jobstates.log',interval=60)
print '\nConnection to server made sucessfully.\n'
return
def createMutationList(self, filename=None):
self.mutationList = Core.Data.MutationListFile(create=False)
return
def fetchJob(self):
"""Get job from it's db ID and add to list"""
mpDlg = MultipleValDialog(title='Get Job',
initialvalues=('','my job1'),
labels=('ID','Your label',),
types=('string','string'),
parent=self.mainwin)
if mpDlg.result == True:
jobid = mpDlg.results[0]
name = mpDlg.results[1]
else:
return
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
if job != None:
print 'adding job id %s to list' %job.identification
self.storeJob(name, job)
self.updateJobs()
return
def writetempPDB(self,name=None,pdbfile='refprot.pdb'):
if name==None:
name = self.DB.meta.refprotein
pdblines = self.DB[name].Structure
#pdbfile = 'refprot.pdb'
fd=open(pdbfile,'w')
for line in pdblines:
fd.write(line)
fd.close()
return pdbfile
def getrefPDBName(self):
name = self.DB.meta.refprotein
if self.DB[name].has_key('pdbname'):
name = self.DB[name]['pdbname']
return name.split('.')[0]
else:
return ''
def createJobDialog(self):
"""Get details from user using dialog
required: structure, mutations, type of calc and a tag (optional)"""
def validatename(text):
if not hasattr(self.DB.meta,'peatsa_jobs'):
return 1
if text in self.DB.meta.peatsa_jobs:
return -1
else:
return 1
def close():
jobdlg.destroy()
def loadmuts():
filename=tkFileDialog.askopenfilename(initialdir=os.getcwd(),
filetypes=[("All files","*")])
if filename:
mutlist.importfile(filename)
return
def loadmutsfromDB():
for p in self.DB.getRecs():
mut = self.DB.get(p).Mutations
if mut == None or mut=='':
continue
if type(mut) is types.StringType:
mutlist.appendtext(mut+'\n')
else:
mutstring = mut.getMutationString()
if mutstring != None:
mutlist.appendtext(mutstring+'\n')
return
def getstruct():
filename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("pdb","*.pdb"),("All files","*.*")])
pdbentry.setvalue(filename)
return
def getligand():
self.ligandfile = tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("mol2","*.mol2"),("All files","*.*")])
def submit():
#if calcmenu.getcurselection() == 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side | random_line_split |
||
PEATSAplugin.py | 'both':
# calcs = ['stability','binding']
if calcmenu.getcurselection() == 'pka':
calcs = ['scan']
else:
calcs = [calcmenu.getcurselection()]
mutationlist = mutlist.getvalue().split('\n')
mutationlist.remove('')
pdbfile=None; pdb = None
quality = mutqualentry.getvalue()
if not hasattr(self.DB.meta, 'refprotein') or self.DB.meta.refprotein == None:
tkMessageBox.showinfo('No ref protein',
'Set a reference (wt) protein first')
return
#if self.useref.get() == 1:
#we use ref pdb by default now
pdbfile = self.writetempPDB()
pdbname = self.getrefPDBName()
if len(mutationlist) == 0 or mutationlist==[u'']:
print 'mutation list is empty'
return
if hasattr(self.DB.meta,'peatsa_jobs') and nameentry.getvalue() in self.DB.meta.peatsa_jobs:
print 'job name already used'
return
name=nameentry.getvalue()
expcol = expcolmenu.getcurselection()
self.submitJob(name=name, pdbname=pdbname,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=mutationlist,
calcs=calcs, mutationquality=quality,
meta={'expcol':expcol,'pdbname':pdbname})
close()
jobdlg = Toplevel()
jobdlg.geometry('+220+220')
jobdlg.title('Create Calculation')
balloon = Pmw.Balloon(jobdlg)
nameentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Name:',
validate = validatename,
value = 'mycalc')
nameentry.pack(fill=BOTH,expand=1)
balloon.bind(nameentry, 'Calculation name can be anything, but should be unique')
expcols = ['']+self.DB.getSimpleFields()
expcolmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Exp. col:',
items = expcols,
initialitem = '',
menubutton_width = 8)
expcolmenu.pack(fill=BOTH,expand=1)
balloon.bind(expcolmenu, 'Field with experimental data to compare, optional')
calcmenu = Pmw.OptionMenu(jobdlg,
labelpos = 'w',
label_text = 'Calculation Type:',
items = self.calctypes,
initialitem = 'stability',
menubutton_width = 8)
calcmenu.pack(fill=X,expand=1)
fr=Frame(jobdlg)
fr.pack(fill=X,expand=1)
mutqualentry = Pmw.EntryField(jobdlg,
labelpos = 'w',
label_text = 'Quality:',
validate = validatename,
value = '2.0')
mutqualentry.pack(fill=BOTH,expand=1)
Label(jobdlg,text='Using PDB: '+self.getrefPDBName()).pack(fill=BOTH,expand=1)
self.ligandfile=None
mutlist = Pmw.ScrolledText(jobdlg,
labelpos = 'n',
label_text='Mutations:',
usehullsize = 1,
hull_width = 200,
hull_height = 250,
text_wrap='word')
mutlist.pack(fill=BOTH,expand=1)
Button(jobdlg,text='Load Mutations from Project',command=loadmutsfromDB).pack(fill=X,expand=1)
Button(jobdlg,text='Load Mutations from File',command=loadmuts).pack(fill=X,expand=1)
balloon.bind(mutlist, 'Enter one mutation per line in the form\n A:0003:ALA or A3A')
f=Frame(jobdlg); f.pack(fill=X,expand=1)
Button(f,text='Submit',command=submit).pack(side=LEFT,fill=X,expand=1,pady=2)
Button(f,text='Cancel',command=close).pack(fill=X,expand=1,pady=2)
jobdlg.grab_set()
jobdlg.transient(self.parent)
self.parent.wait_window(jobdlg)
return
def submitJob(self, name='mycalc', pdbname=None, pdb=None, pdbfile=None, ligandfile=None,
mutations=[], calcs=['stability'], mutationquality='2.0', meta={}):
"""Submit job to server"""
if 'scan' in calcs and pdbname==None:
print 'You must provide pdb code for pKa calcs'
return
if pdb==None and pdbfile==None:
return
job = self.jobManager.createJob(pdbId=pdbname, calculations=calcs,
dataTable='Data', metadata=meta,
optionArgs={'--mutationQuality':mutationquality})
if pdb != None:
job.setStructure(pdb)
else:
job.setStructureFromFile(pdbfile)
if 'binding' in calcs:
job.setLigandFromFile(ligandfile)
self.mutationList = Core.Data.MutationListFile(filename='tempmutlist', create=True)
sets=[]
for code in mutations:
if code == '': continue
try:
sets.append(Core.Data.MutationSet(code))
except:
print 'mutation code %s incorrect' %code
for s in sets:
self.mutationList.addMutant(s, autoUpdate=False, ignoreDuplicates=True)
self.mutationList.removeDuplicates(autoUpdate=False)
job.setMutationListFile(self.mutationList)
job.setState('Ready')
self.jobManager.logJobStates('jobstates.log')
#add job to peat database
self.storeJob(name, job)
if self.parent != None:
username = self.parent.username
self.updateJobs()
else:
username = None
self.DB.commit(note='peatsa job',user=username)
print 'job submitted successfully'
return
def resubmitJob(self):
"""Resend a job based on new mutations in DB that are not in job already"""
job, name = self.getJob()
if job == None:
return
DB=self.DB
self.matrices = job.data.allMatrices()
for m in matrices:
matrix=matrices[m]
if matrix==None: return
muts = matrix.mutationCodes()
dbmuts = [DB.get(p).Mutations for p in DB.getRecs()]
newmuts = list(set(dbmuts) - set(muts))
print 'the following mutations in the project are not in the job: %s' %newmuts
'''self.submitJob(name=name,
pdb=pdb, pdbfile=pdbfile,
ligandfile=self.ligandfile,
mutations=newmuts,
calcs=calcs, meta={'expcol':expcol}) '''
self.log.yview('moveto', 1)
return
def getJob(self, name=None):
"""Get job from name"""
if name == None:
name = self.jobstable.get_selectedRecordNames()[0]
if name == None:
return None, name
jobid = self.DB.meta.peatsa_jobs[name]
try:
job = PEATSA.WebApp.Data.Job(jobid, self.connection)
except:
#print 'job not in database'
return None,name
return job, name
def removeJob(self):
"""Remove a job from the db"""
job, name = self.getJob()
answer = tkMessageBox.askyesno("Warning",'Remove this job?')
if answer == False:
return
try:
self.jobManager.deleteJob(job)
except:
print 'job not in database, removing from peat'
del self.DB.meta.peatsa_jobs[name]
self.DB.meta.__p__changed = 1
self.updateJobs()
return
def viewDetails(self, name=None):
job, name = self.getJob()
if job==None:
return
jobmeta = job.metadata()
print
print job.data
print 'details for job %s' %name
print 'job status:',job.state()
print 'submitted on ',job.date
if jobmeta.has_key('pdbname'):
print 'original pdb file:', jobmeta['pdbname']
print 'mutations:', len(job.mutationListFile().mutantList())
print '(this job has id %s)' %job.identification
if job.error() != None:
print 'The job had an error..'
print job.error()['ErrorDescription']
print job.error()['DetailedDescription']
print
self.log.yview('moveto', 1)
return
def addColoredText(self, st, tag, word, fg='black', bg='white'):
| """add a space to the end of the word"""
word = word + " "
st.insert('end', word)
end_index = st.index('end')
begin_index = "%s-%sc" % (end_index, len(word) + 1)
st.tag_add(tag, begin_index, end_index)
st.tag_config(tag, foreground=fg, background=bg)
return | identifier_body |
|
Weather.py |
def getTargetNames(self):
""" Get target names
Returns:
Target names
"""
return self.targetNames
def getNrTargets(self):
""" Get number of targets
Returns:
Number of targets
"""
return self.targetNames.size
def getFeatures(self):
""" Get feature names
Returns:
Feature names
"""
return self.featureNames
def getNrFeatures(self):
""" Get number of features
Returns:
Number of features
"""
return self.featureNames.size
def getFeatureData(self, feature):
""" Get data for chosen feature
Args:
feature (str): selected feature
Returns:
Observation data of the selected feature (list)
"""
return self.data[:,self._getFIdx(feature)]
def getStationData(self, stationId):
""" Get data for chosen station
Args:
stationId (str): selected station
Returns:
Observation data of the selected station (list)
"""
if (stationId == 'all'):
return self.stationData
else:
station = np.where(self.stationData == stationId)[0][0]
return self.stationData[station]
def getNrStations(self):
""" Get number of observation stations
Returns:
Number of observation stations
"""
return len(self.stationData)
## DATA MANIPULATION METHODS
def modify(self, feature, newValues):
""" Replace the data of a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features):
features = [self._getFIdx(f) for f in features]
return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to | """ Get number of weather observations read from file
Returns:
Number of weather observations
"""
return len(self.data) | identifier_body |
|
Weather.py | if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
self.dataStream = 1
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = np.array(inputData)
return 0
def _getFIdx(self, featureName):
""" Get Feature Index in data numpy array
Args:
featureName (str): Name of feature
Returns:
index
"""
return np.where(self.featureNames == featureName)[0][0]
def _isFIdx(self, featureName):
""" Look up if feature name is indexed in data numpy array
Args:
featureName (str): Name of feature
Returns:
1 if success, 0 if not found
"""
return 1 if (featureName in self.featureNames) else 0
## PRIVATE STATS UTILITIES
def _getDistance(self, source, dest):
""" Get the distance as crow flies between two coordinates
Args:
source (float): Longitude and Latitude of source point
source (float): Longitude and Latitude of destination point
Returns:
distance (float): distance betwen points
"""
lat1 = source[0]
lat2 = dest[0]
lon1 = source[1]
lon2 = dest[1]
# Formula from https://www.movable-type.co.uk/scripts/latlong.html
R = 6370000
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2-lat1)
deltalmb = math.radians(lon2-lon1)
a = math.sin(deltaPhi/2) * math.sin(deltaPhi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(deltalmb/2) * math.sin(deltalmb/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = (R * c)/1000.
return d
def _getNewCoords(self, coords, offset):
""" Calculate new coordinates after applying offset
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
BUG?:
direction seems to be opposite from what I expect, made correction of 360-x
LIMITATION:
Due E (or W) gives slightly different results for latitude (e.g. 50N over 200km is 49.96N)
Returns:
coords (list(float, float)): New coordinates
"""
oldlat = math.radians(float(coords[0]))
oldlon = math.radians(float(coords[1]))
magnitude = float(offset[0]) / 6370.
direction = math.radians(360.-float(offset[1]))
# Calculate lat/lon given radial and distnace (http://www.edwilliams.org/avform.htm#LL)
lat = math.asin(math.sin(oldlat) * math.cos(magnitude) + math.cos(oldlat) \
* math.sin(magnitude) * math.cos(direction))
lon = (oldlon - math.asin(math.sin(direction) * math.sin(magnitude) / math.cos(lat)) \
+ math.pi) % (2 * math.pi) - math.pi
# print coords, offset, oldlat, oldlon, magnitude, direction, math.degrees(lat), math.degrees(lon)
return (math.degrees(lat), math.degrees(lon))
# Workaround on earlier numpy versions from https://github.com/numpy/numpy/issues/2871
def _unique_rows(self, A, return_index=False, return_inverse=False):
"""
Similar to MATLAB's unique(A, 'rows'), this returns B, I, J
where B is the unique rows of A and I and J satisfy
A = B[J,:] and B = A[I,:]
Returns I if return_index is True
Returns J if return_inverse is True
"""
A = np.require(A, requirements='C')
assert A.ndim == 2, "array must be 2-dim'l"
B = np.unique(A.view([('', A.dtype)]*A.shape[1]),
return_index=return_index,
return_inverse=return_inverse)
if return_index or return_inverse:
return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \
+ B[1:]
else:
return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
def | _getRelTime | identifier_name |
|
Weather.py | of a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features):
features = [self._getFIdx(f) for f in features]
return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
|
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = | self.dataStream = 1 | conditional_block |
Weather.py | a chosen feature with a given list of new values
Args:
feature (str): selected feature
newValues (list(str)): New set of values to overwrite old data
Returns:
0 for success
"""
self.data[:,self._getFIdx(feature)] = newValues
return 0
def append(self, featureName, featureData):
""" Append the data with a new feature and list of new values
Args:
featureName (str): name of new feature to add
featureData (list(str)): New set of values to add
Returns:
0 for success
"""
self.data = np.concatenate((self.data, np.array([featureData]).T), axis=1)
self.featureNames = np.append(self.featureNames, featureName)
return 0
def select(self, features):
""" Select a set of features to retain (and remove other features)
Args:
features (list(str)): Selected set of features
Returns:
0 for success
"""
if 'Weather Type' not in features:
features.append('Weather Type')
self.data = self.data[:,[self._getFIdx(f) for f in features]]
self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]
return 0
def discard(self):
""" Discard observations with null data
Returns:
0 for success
"""
for f in self.featureNames:
self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']
return
def delete(self, feature):
""" Delete a feature and assoicated data
Args:
feature (str): name of feature to delete
Returns:
0 for success
"""
if (self._isFIdx(feature)):
self.data = np.delete(self.data, self._getFIdx(feature), axis=1)
self.featureNames = np.delete(self.featureNames, self._getFIdx(feature))
return 0
def export(self, fname):
""" Export object to pickle file
Args:
fname (str): export file name
Returns:
0 for success
"""
# discard any data with null feature values
self.discard()
# set target as last column
self.target = self.getFeatureData('Weather Type')
# remove non-exportable features
for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:
if self._isFIdx(n):
self.delete(n)
# convert all data to float
self.data = self.data.astype(float)
# export to file
pickle.dump(self, open(fname, 'wb'))
return 0
## STATS UTILITIES
def getObservations(self, stationId='', obsDate='', obsTime='', features=[]):
""" Provide observation data for a chosen feature filtered by station, date, time
Args:
stationId (str): Station ID
obsDate (str): Observation date
obsTime (str): Observation time
features (list(str)): List of chosen features
Returns:
stats (list): List of observation data
"""
# filter data
stats = self.data
if (stationId):
stats = stats[stats[:,self._getFIdx('Station ID')] == stationId]
if (obsDate):
stats = stats[stats[:,self._getFIdx('Date')] == obsDate]
if (obsTime):
stats = stats[stats[:,self._getFIdx('Time since midnight')] == obsTime]
# return features
if (features): | return stats[:,features]
else:
return stats
def findStations(self, coords=[], offset=[], minThreshold=10, maxThreshold=100):
""" Find the nearet observation station to a given location
Args:
coords (list(str1, str2)): Latitude and Longitude of location
offset (list(str1, str2)): Magnitude (km) and Direction (deg) offset to apply to location
minThreshold (int): Minimum acceptable distance from chosen location
maxThreshold (int): Maximum acceptable distance from chosen location
Returns:
stations (list): List of nearby stations
"""
nearStations = []
# check for supplied Latitude and Longitude
if not (coords[0] and coords[1]):
return 0
# calculate new coords with offset
if (offset):
if not (offset[0] and offset[1]):
return 0
coords = self._getNewCoords(coords, offset)
# iterate through weather stations
for s in self.stationData:
# get distance between point and station
distance = self._getDistance([float(coords[0]), float(coords[1])], \
[float(s[2]), float(s[3])] )
# add if within threshold
if ((distance > minThreshold) and (distance < maxThreshold)):
nearStations.append([s[0], s[1], s[2], s[3], distance])
return sorted(nearStations, key=lambda x: (x[4]))
def setRelTime(self):
""" Define new feature to track observation time relative to start of sample
Returns:
0 if success
"""
obsRelTime = [self._getRelTime(o) for o in self.data]
self.append('Relative Time', obsRelTime)
return 0
## PRIVATE SET METHODS
def _setTargetNames(self):
""" Set target names based on data stream
Returns:
0 if success
"""
# full target names
if (self.dataStream == 0):
self.targetNames = np.array(['Clear Night', 'Sunny Day', 'Partly cloudy (night)', 'Partly cloudy (day)',\
'Not used', 'Mist', 'Fog', 'Cloudy', 'Overcast', 'Light rain shower (night)', \
'Light rain shower (day)', 'Drizzle', 'Light rain', 'Heavy rain shower (night)', \
'Heavy rain shower (day)', 'Heavy rain', 'Sleet shower (night)', 'Sleet shower (day)', \
'Sleet', 'Hail shower (night)', 'Hail shower (day)', 'Hail', 'Light snow shower (night)', \
'Light snow shower (day)', 'Light snow', 'Heavy snow shower (night)', 'Heavy snow shower (day)', \
'Heavy snow', 'Thunder shower', 'Thunder shower (night)', 'Thunder'])
# main target names
elif (self.dataStream == 1):
self.targetNames = np.array(['Clear', 'Partly Cloudy', 'Mist', 'Fog', 'Cloudy', \
'Overcast', 'Rain', 'Sleet', 'Hail', 'Snow', 'Thunder'])
# basic target names
elif (self.dataStream == 2):
self.targetNames = np.array(['Clear', 'Cloudy', 'Precipitation'])
return 0
def _setFeatureNames(self):
""" Set feature names
Returns:
0 if success
"""
self.featureNames = np.array(['Station ID', 'Station Name', 'Elevation', 'Latitude', 'Longitude', 'Date', \
'Time since midnight', 'Gust', 'Temperature', 'Visibilty', 'Wind Direction', \
'Wind Speed', 'Pressure', 'Pressure Trend', 'Dew Point', 'Humidity', 'Weather Type'])
return 0
def _setStationData(self):
""" Set station data
LIMITATION:
Old version of numpy on desktop PCs which does not accept axis \
argument in np.unique(). Use workaround to reduce array
Returns:
0 if success
"""
self.stationData = self.data[:,[self._getFIdx(f) for f in \
'Station ID', 'Station Name', 'Latitude', 'Longitude']]
# self.stationData = np.unique(self.stationData, axis=0)
self.stationData = self._unique_rows(self.stationData)
return 0
## PRIVATE DATA MANIPULATION METHODS
def _load(self):
""" Load data from file
Returns:
0 if success, -1 if file cannot be read
"""
# number of non-data header details at top of data file
header = 1
# open file
weatherData = []
with open(self.wfile) as myfile:
if (self.lines > 0):
weatherData = [next(myfile) for x in xrange(self.lines + header)]
else:
weatherData = myfile.readlines()
# get data stream from first line
streamHeader = weatherData.pop(0).rstrip()
if (streamHeader == 'FULL'):
self.dataStream = 0
elif (streamHeader == 'ADVANCED'):
self.dataStream = 1
elif (streamHeader == 'BASIC'):
self.dataStream = 2
else:
print "Error: unecognised data stream from file %s" % (self.wfile)
return -1
# read data
inputData = []
for line in weatherData:
entries = line.split()
inputData.append(entries)
# copy all into np array
self.data = np | features = [self._getFIdx(f) for f in features] | random_line_split |
TD_Functions.py | 5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]]
elif fwd_matches[0] > 0 and rev_matches[0] == seq_len-1:
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] < seq_len-1:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == seq_len-1:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = seq_str[r_match-1:r_match+1][::-1]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: conti | nue
| conditional_block |
|
TD_Functions.py | K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r_next = rev_matches[i+1]
#if either || or |x| or |xx|
if f_next-f_match < 4:
NN = seq_str[f_match:f_match+2]
RV = rev_str[r_match:r_match+2]
#salt-corrected dG
dG += MismatchNN[NN][RV] + dG_Na
#if ||
if f_next-f_match == 1: continue
#if |x| or |xx|
elif f_next-f_match < 4:
NN1 = rev_str[r_next-1:r_next+1][::-1]
RV1 = seq_str[f_next-1:f_next+1][::-1]
dG += MismatchNN[NN1][RV1] + dG_Na
continue
#loop
elif f_next-f_match < 31:
dG += loop_dG(f_next-f_match-1, 'I') + 2*Terminal_mismatch_mean
else: pass
return dG
#end def
def hairpin_dG(hairpin, seq):
fwd_matches = list(hairpin[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(hairpin[1])
rev_matches.sort(reverse=True)
#e.g (24,23,22,18,17)
seq_str = str(seq)
seq_len = len(seq_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] < seq_len-1:
dG += DanglingNN['X'+seq_str[rev_matches[0]]][seq_str[rev_matches[0]+1]] | random_line_split |
||
TD_Functions.py | )
#utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def format_PCR_conditions():
conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
| def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
r | feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end | identifier_body |
TD_Functions.py | #utility functions
def print_exception(e):
print "Exception occurred: " + str(type(e)) + " : " + e.__str__()
###############################################################################
#standard PCR conditions
C_Mg = 1.5 #mM
C_Na = 50 #mM; should be above 0.05M and below 1.1M
C_dNTP = 0 #mM
C_DNA = 50 #nM; DNA template concentration
C_Prim = 0.1 #uM; Primer concentration
C_DMSO = 0 #percent
def C_Na_eq():
"""divalent cation correction (Ahsen et al., 2001)"""
global C_Na, C_Mg, C_dNTP
return C_Na + 120*sqrt(C_Mg - C_dNTP)
#end def
def NN_Tr(seq, r):
'''Calculate temperature for primer-template association equilibrium
with 'r' ratio using two-state equilibrium model and the Nearest Neighbor \
TD tables and from the paper of SantaLucia & Hicks (2004).
Note, that two-state equilibrium model used here is based on assumption, that
primer sequence is not self-complementary.'''
#value constraints
if r >=1 or r <=0:
raise ValueError('TD_Functions.NN_Tr: equilibrium ratio should be in the (0;1) interval.')
#definitions
global C_Prim, C_DNA, C_DMSO, R, K0, Sym_Correction
seq_str = str(seq)
rev_com = str(seq.reverse_complement())
seq_len = len(seq)
dH, dS = 0, 0
#concentrations
P = C_Prim*1e-6
D = C_DNA *1e-9
DUP = r*min(P,D)
#equilibrium constant
K = DUP/((P-DUP)*(D-DUP))
#initial corrections
dH += delta_H('ini', 'ini')
dS += delta_S('ini', 'ini')
#test for AT terminals
if seq_str[0] == 'A' or seq_str[0] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dH += delta_H('ter', 'ter')
dS += delta_S('ter', 'ter')
#stacking interactions
for n in range(len(seq_str)-1):
NN = seq_str[n:n+2]
RC = rev_com[seq_len-n-2:seq_len-n]
dH += delta_H(NN, RC)
dS += delta_S(NN, RC)
#salt concentration correction
dS = dS + dS_Na_coefficient * len(seq_str) * log(C_Na_eq()*1e-3) #C_Na mM
#final temperature calculation
return dH * 1000/(dS - R * log(K)) + K0 - 0.75 * C_DMSO #DMSO correction from [2]
#end def
def NN_Tm(seq): return NN_Tr(seq, 0.5)
def source_feature(seq_rec):
feature = None
for f in seq_rec.features:
if f.type == 'source':
feature = f
break
if not feature:
feature = SeqFeature(FeatureLocation(0,len(seq_rec.seq)),
type = 'source')
seq_rec.features.append(feature)
return feature
#end def
def forma | conc_str = ''
spacer = max(len(str(C_Na)),
len(str(C_Mg)),
len(str(C_dNTP)),
len(str(C_DNA)),
len(str(C_Prim)),
len(str(C_DMSO)))
conc_str += 'C(Na) = ' + str(C_Na) + ' '*(spacer-len(str(C_Na))) +' mM\n'
conc_str += 'C(Mg) = ' + str(C_Mg) + ' '*(spacer-len(str(C_Mg))) +' mM\n'
conc_str += 'C(dNTP) = ' + str(C_dNTP)+ ' '*(spacer-len(str(C_dNTP))) +' mM\n'
conc_str += 'C(DNA) = ' + str(C_DNA) + ' '*(spacer-len(str(C_DNA))) +' nM\n'
conc_str += 'C(Primer) = ' + str(C_Prim)+ ' '*(spacer-len(str(C_Prim))) +' uM\n'
conc_str += 'C(DMSO) = ' + str(C_DMSO)+ ' '*(spacer-len(str(C_DMSO))) +' %\n'
return conc_str
#end_def
def add_PCR_conditions(feature):
try:
feature.qualifiers['C_Na'] = str(C_Na)+ ' mM'
feature.qualifiers['C_Mg'] = str(C_Mg)+ ' mM'
feature.qualifiers['C_dNTP'] = str(C_dNTP)+' mM'
feature.qualifiers['C_DNA'] = str(C_DNA)+ ' nM'
feature.qualifiers['C_Primer'] = str(C_Prim)+' uM'
feature.qualifiers['C_DMSO'] = str(C_DMSO)+' %'
except Exception, e:
print 'add_PCR_conditions:'
print_exception(e)
#end def
def calculate_Tr(seq_rec, r):
primer_Tr = NN_Tr(seq_rec.seq, r)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['T-'+str(r)] = str(primer_Tr)
return primer_Tr
#end def
def calculate_Tm(seq_rec):
primer_Tm = NN_Tm(seq_rec.seq)
feature = source_feature(seq_rec)
add_PCR_conditions(feature)
feature.qualifiers['Tm'] = str(primer_Tm)
return primer_Tm
#end def
def dimer_dG(dimer, seq1, seq2):
fwd_matches = list(dimer[0])
fwd_matches.sort()
#e.g. (2 ,3 ,4 ,8 ,9 )
rev_matches = list(dimer[1])
rev_matches.sort()
#e.g. (13,14,15,19,20)
seq_str = str(seq1)
seq_len = len(seq_str)
rev_str = str(seq2[::-1])
rev_len = len(rev_str)
dG_Na = dG_Na_coefficient_oligo * 1 * log(C_Na_eq()*1e-3)
dG = delta_G('ini', 'ini')
#check for 'left' dangling end
if fwd_matches[0] == 0 and rev_matches[0] > 0: #3' dangling
dG += DanglingNN[rev_str[rev_matches[0]]+'X'][rev_str[rev_matches[0]-1]]
elif rev_matches[0] == 0 and fwd_matches[0] > 0: #5' dangling
dG += DanglingNN['X'+seq_str[fwd_matches[0]]][seq_str[fwd_matches[0]-1]]
#check for 'left' terminal mismatch
elif fwd_matches[0] > 0 and rev_matches[0] > 0:
dG += Terminal_mismatch_mean
#check for 'left' terminal AT
elif fwd_matches[0] == 0 and rev_matches[0] == 0:
if seq_str[0] == 'A' or seq_str[0] == 'T':
dG += delta_G('ter', 'ter')
#check for 'right' dangling end
if fwd_matches[-1] == seq_len-1 and rev_matches[-1] < rev_len-1: #5' dangling
dG += DanglingNN['X'+rev_str[rev_matches[-1]]][rev_str[rev_matches[-1]+1]]
elif rev_matches[-1] == rev_len-1 and fwd_matches[-1] < seq_len-1: #3' dangling
dG += DanglingNN[seq_str[fwd_matches[-1]]+'X'][seq_str[fwd_matches[-1]+1]]
#check for 'right' terminal mismatch
elif fwd_matches[-1] < seq_len-1 and rev_matches[0] < rev_len-1:
dG += Terminal_mismatch_mean
#check for 'right' terminal AT
elif fwd_matches[-1] == seq_len-1 and rev_matches[-1] == rev_len-1:
if seq_str[-1] == 'A' or seq_str[-1] == 'T':
dG += delta_G('ter', 'ter')
#stacking and mismatches
for i in range(len(fwd_matches)-1):
f_match = fwd_matches[i]
f_next = fwd_matches[i+1]
r_match = rev_matches[i]
| t_PCR_conditions():
| identifier_name |
upload.rs | std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
let mut tls_builder = native_tls::TlsConnector::builder();
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
for cert in rustls_pemfile::certs(&mut reader)? {
tls_builder.add_root_certificate(native_tls::Certificate::from_pem(&cert)?);
}
}
builder = builder.tls_connector(Arc::new(tls_builder.build()?));
Ok(builder.build())
}
#[cfg(feature = "rustls")]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
use std::sync::Arc;
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
if let Some(ca_bundle) = tls_ca_bundle() {
let mut reader = io::BufReader::new(File::open(ca_bundle)?);
let certs = rustls_pemfile::certs(&mut reader)?;
let mut root_certs = rustls::RootCertStore::empty();
root_certs.add_parsable_certificates(&certs);
let client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_certs)
.with_no_client_auth();
Ok(builder.tls_config(Arc::new(client_config)).build())
} else {
Ok(builder.build())
}
}
#[cfg(not(any(feature = "native-tls", feature = "rustls")))]
#[allow(clippy::result_large_err)]
fn http_agent() -> Result<ureq::Agent, UploadError> {
let mut builder = ureq::builder();
if let Ok(proxy) = http_proxy() {
let proxy = ureq::Proxy::new(proxy)?;
builder = builder.proxy(proxy);
};
Ok(builder.build())
}
/// Uploads a single wheel to the registry
#[allow(clippy::result_large_err)]
pub fn upload(registry: &Registry, wheel_path: &Path) -> Result<(), UploadError> {
let hash_hex = hash_file(wheel_path)?;
let dist = python_pkginfo::Distribution::new(wheel_path)
.map_err(|err| UploadError::PkgInfoError(wheel_path.to_owned(), err))?;
let metadata = dist.metadata();
let mut api_metadata = vec![
(":action", "file_upload".to_string()),
("sha256_digest", hash_hex),
("protocol_version", "1".to_string()),
("metadata_version", metadata.metadata_version.clone()),
("name", canonicalize_name(&metadata.name)),
("version", metadata.version.clone()),
("pyversion", dist.python_version().to_string()),
("filetype", dist.r#type().to_string()),
];
let mut add_option = |name, value: &Option<String>| {
if let Some(some) = value.clone() {
api_metadata.push((name, some));
}
};
// https://github.com/pypa/warehouse/blob/75061540e6ab5aae3f8758b569e926b6355abea8/warehouse/forklift/legacy.py#L424
add_option("summary", &metadata.summary);
add_option("description", &metadata.description);
add_option(
"description_content_type",
&metadata.description_content_type,
);
add_option("author", &metadata.author);
add_option("author_email", &metadata.author_email);
add_option("maintainer", &metadata.maintainer);
add_option("maintainer_email", &metadata.maintainer_email);
add_option("license", &metadata.license);
add_option("keywords", &metadata.keywords);
add_option("home_page", &metadata.home_page);
add_option("download_url", &metadata.download_url);
add_option("requires_python", &metadata.requires_python);
add_option("summary", &metadata.summary);
if metadata.requires_python.is_none() {
// GitLab PyPI repository API implementation requires this metadata field
// and twine always includes it in the request, even when it's empty.
api_metadata.push(("requires_python", "".to_string()));
}
let mut add_vec = |name, values: &[String]| {
for i in values {
api_metadata.push((name, i.clone()));
}
};
add_vec("classifiers", &metadata.classifiers);
add_vec("platform", &metadata.platforms);
add_vec("requires_dist", &metadata.requires_dist);
add_vec("provides_dist", &metadata.provides_dist);
add_vec("obsoletes_dist", &metadata.obsoletes_dist);
add_vec("requires_external", &metadata.requires_external);
add_vec("project_urls", &metadata.project_urls);
let wheel = File::open(wheel_path)?;
let wheel_name = wheel_path
.file_name()
.expect("Wheel path has a file name")
.to_string_lossy();
let mut form = Multipart::new();
for (key, value) in api_metadata {
form.add_text(key, value);
}
form.add_stream("content", &wheel, Some(wheel_name), None);
let multipart_data = form.prepare().map_err(|e| e.error)?;
let encoded = STANDARD.encode(format!("{}:{}", registry.username, registry.password));
let agent = http_agent()?;
let response = agent
.post(registry.url.as_str())
.set(
"Content-Type",
&format!(
"multipart/form-data; boundary={}",
multipart_data.boundary()
),
)
.set(
"User-Agent",
&format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
)
.set("Authorization", &format!("Basic {encoded}"))
.send(multipart_data);
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(status, response)) => {
let err_text = response.into_string().unwrap_or_else(|e| {
format!(
"The registry should return some text, \
even in case of an error, but didn't ({e})"
)
});
debug!("Upload error response: {}", err_text);
// Detect FileExistsError the way twine does
// https://github.com/pypa/twine/blob/87846e5777b380d4704704a69e1f9a7a1231451c/twine/commands/upload.py#L30
if status == 403 {
if err_text.contains("overwrite artifact") {
// Artifactory (https://jfrog.com/artifactory/)
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::AuthenticationError(err_text))
}
} else {
let status_string = status.to_string();
if status == 409 // conflict, pypiserver (https://pypi.org/project/pypiserver)
// PyPI / TestPyPI
|| (status == 400 && err_text.contains("already exists"))
// Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
|| (status == 400 && err_text.contains("updating asset"))
// # Gitlab Enterprise Edition (https://about.gitlab.com)
|| (status == 400 && err_text.contains("already been taken"))
{
Err(UploadError::FileExistsError(err_text))
} else {
Err(UploadError::StatusCodeError(status_string, err_text))
}
}
}
Err(err) => Err(UploadError::UreqError(err.into())),
}
}
/// Handles authentication/keyring integration and retrying of the publish subcommand
pub fn upload_ui(items: &[PathBuf], publish: &PublishOpt) -> Result<()> {
let registry = complete_registry(publish)?;
eprintln!("🚀 Uploading {} packages", items.len());
for i in items {
let upload_result = upload(®istry, i);
match upload_result {
Ok(()) => (),
Err(UploadError::AuthenticationError(msg)) => {
let title_re = regex::Regex::new(r"<title>(.+?)</title>").unwrap();
let title = title_re
.captures(&msg)
.and_then(|c| c.get(1))
.map(|m| m.as_str());
match title {
Some(title) => {
eprintln!("⛔ {title}");
}
None => eprintln!("⛔ Username and/or password are wrong"),
}
#[cfg(feature = "keyring")]
{
// Delete the wrong password from the keyring
let old_username = registry.username;
match keyring::Entry::new(env!("CARGO_PKG_NAME"), &old_username)
.and_then(|keyring| keyring.delete_password())
{
Ok(()) => {
eprintln!("🔑 Removed wrong password from keyring")
} | Err(keyring::Error::NoEntry)
| Err(keyring::Error::NoStorageAccess(_)) | random_line_split |
|
upload.rs | UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
| config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS | config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
| conditional_block |
upload.rs | UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
| // Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceResponse {
audience: String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUEST | Registry {
username,
password,
url,
}
}
}
/ | identifier_body |
upload.rs | UploadError {
fn from(error: native_tls::Error) -> Self {
UploadError::TlsError(error)
}
}
/// A pip registry such as pypi or testpypi with associated credentials, used
/// for uploading wheels
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Registry {
/// The username
pub username: String,
/// The password
pub password: String,
/// The url endpoint for legacy uploading
pub url: String,
}
impl Registry {
/// Creates a new registry
pub fn new(username: String, password: String, url: String) -> Registry {
Registry {
username,
password,
url,
}
}
}
/// Attempts to fetch the password from the keyring (if enabled)
/// and falls back to the interactive password prompt.
fn get_password(_username: &str) -> String {
#[cfg(feature = "keyring")]
{
let service = env!("CARGO_PKG_NAME");
let keyring = keyring::Entry::new(service, _username);
if let Ok(password) = keyring.and_then(|keyring| keyring.get_password()) {
return password;
};
}
dialoguer::Password::new()
.with_prompt("Please enter your password")
.interact()
.unwrap_or_else(|_| {
// So we need this fallback for pycharm on windows
let mut password = String::new();
io::stdin()
.read_line(&mut password)
.expect("Failed to read line");
password.trim().to_string()
})
}
fn get_username() -> String {
eprintln!("Please enter your username:");
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
line.trim().to_string()
}
fn load_pypirc() -> Ini {
let mut config = Ini::new();
if let Some(mut config_path) = dirs::home_dir() {
config_path.push(".pypirc");
if let Ok(pypirc) = fs::read_to_string(config_path.as_path()) {
let _ = config.read(pypirc);
}
}
config
}
fn load_pypi_cred_from_config(config: &Ini, registry_name: &str) -> Option<(String, String)> {
if let (Some(username), Some(password)) = (
config.get(registry_name, "username"),
config.get(registry_name, "password"),
) {
return Some((username, password));
}
None
}
/// Gets the PyPI credentials from (in precedence order):
///
/// 1. `MATURIN_PYPI_TOKEN` environment variable
/// 2. `.pypirc` config file
/// 3. maturin command arguments
/// 4. `MATURIN_USERNAME` and `MATURIN_PASSWORD` environment variables
/// 5. the password keyring
/// 6. interactive prompt
fn resolve_pypi_cred(
opt: &PublishOpt,
config: &Ini,
registry_name: Option<&str>,
registry_url: &str,
) -> Result<(String, String)> {
// API token from environment variable takes priority
if let Ok(token) = env::var("MATURIN_PYPI_TOKEN") {
return Ok(("__token__".to_string(), token));
}
// Try to get a token via OIDC exchange
match resolve_pypi_token_via_oidc(registry_url) {
Ok(Some(token)) => {
eprintln!("🔐 Using trusted publisher for upload");
return Ok(("__token__".to_string(), token));
}
Ok(None) => {}
Err(e) => eprintln!("⚠️ Warning: Failed to resolve PyPI token via OIDC: {}", e),
}
if let Some((username, password)) =
registry_name.and_then(|name| load_pypi_cred_from_config(config, name))
{
eprintln!("🔐 Using credential in pypirc for upload");
return Ok((username, password));
}
// fallback to username and password
if opt.non_interactive && (opt.username.is_none() || opt.password.is_none()) {
bail!("Credentials not found and non-interactive mode is enabled");
}
let username = opt.username.clone().unwrap_or_else(get_username);
let password = opt
.password
.clone()
.unwrap_or_else(|| get_password(&username));
Ok((username, password))
}
#[derive(Debug, Deserialize)]
struct OidcAudienceRes | : String,
}
#[derive(Debug, Deserialize)]
struct OidcTokenResponse {
value: String,
}
#[derive(Debug, Deserialize)]
struct MintTokenResponse {
token: String,
}
/// Trusted Publisher support for GitHub Actions
fn resolve_pypi_token_via_oidc(registry_url: &str) -> Result<Option<String>> {
if env::var_os("GITHUB_ACTIONS").is_none() {
return Ok(None);
}
if let (Ok(req_token), Ok(req_url)) = (
env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN"),
env::var("ACTIONS_ID_TOKEN_REQUEST_URL"),
) {
let registry_url = url::Url::parse(registry_url)?;
let mut audience_url = registry_url.clone();
audience_url.set_path("_/oidc/audience");
debug!("Requesting OIDC audience from {}", audience_url);
let agent = http_agent()?;
let audience_res = agent
.get(audience_url.as_str())
.timeout(Duration::from_secs(30))
.call()?;
if audience_res.status() == 404 {
// OIDC is not enabled/supported on this registry
return Ok(None);
}
let audience = audience_res.into_json::<OidcAudienceResponse>()?.audience;
debug!("Requesting OIDC token for {} from {}", audience, req_url);
let request_token_res: OidcTokenResponse = agent
.get(&req_url)
.query("audience", &audience)
.set("Authorization", &format!("bearer {req_token}"))
.timeout(Duration::from_secs(30))
.call()?
.into_json()?;
let oidc_token = request_token_res.value;
let mut mint_token_url = registry_url;
mint_token_url.set_path("_/oidc/github/mint-token");
debug!("Requesting API token from {}", mint_token_url);
let mut mint_token_req = HashMap::new();
mint_token_req.insert("token", oidc_token);
let mint_token_res = agent
.post(mint_token_url.as_str())
.timeout(Duration::from_secs(30))
.send_json(mint_token_req)?
.into_json::<MintTokenResponse>()?;
return Ok(Some(mint_token_res.token));
}
Ok(None)
}
/// Asks for username and password for a registry account where missing.
fn complete_registry(opt: &PublishOpt) -> Result<Registry> {
// load creds from pypirc if found
let pypirc = load_pypirc();
let (registry_name, registry_url) = if let Some(repository_url) = opt.repository_url.as_deref()
{
let name = match repository_url {
PublishOpt::DEFAULT_REPOSITORY_URL => Some("pypi"),
PublishOpt::TEST_REPOSITORY_URL => Some("testpypi"),
_ => None,
};
(name, repository_url.to_string())
} else if let Some(url) = pypirc.get(&opt.repository, "repository") {
(Some(opt.repository.as_str()), url)
} else if opt.repository == "pypi" {
(Some("pypi"), PublishOpt::DEFAULT_REPOSITORY_URL.to_string())
} else if opt.repository == "testpypi" {
(
Some("testpypi"),
PublishOpt::TEST_REPOSITORY_URL.to_string(),
)
} else {
bail!(
"Failed to get registry {} in .pypirc. \
Note: Your index didn't start with http:// or https://, \
which is required for non-pypirc indices.",
opt.repository
);
};
let (username, password) = resolve_pypi_cred(opt, &pypirc, registry_name, ®istry_url)?;
let registry = Registry::new(username, password, registry_url);
Ok(registry)
}
/// Port of pip's `canonicalize_name`
/// https://github.com/pypa/pip/blob/b33e791742570215f15663410c3ed987d2253d5b/src/pip/_vendor/packaging/utils.py#L18-L25
fn canonicalize_name(name: &str) -> String {
Regex::new("[-_.]+")
.unwrap()
.replace_all(name, "-")
.to_lowercase()
}
fn http_proxy() -> Result<String, env::VarError> {
env::var("HTTPS_PROXY")
.or_else(|_| env::var("https_proxy"))
.or_else(|_| env::var("HTTP_PROXY"))
.or_else(|_| env::var("http_proxy"))
.or_else(|_| env::var("ALL_PROXY"))
.or_else(|_| env::var("all_proxy"))
}
#[cfg(any(feature = "native-tls", feature = "rustls"))]
fn tls_ca_bundle() -> Option<OsString> {
env::var_os("MATURIN_CA_BUNDLE")
.or_else(|| env::var_os("REQUESTS | ponse {
audience | identifier_name |
armature.py | .frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_matrix(bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in | return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult) | identifier_body |
armature.py |
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def append_animation_pose(self, frame, force = False):
currentBoneMatrix = self.get_bone_matrix()
if (force or not same_matrix4(currentBoneMatrix, self.previousBoneMatrix)):
self.animation.frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_matrix(bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
| self.animation = Animation(ANIMATIONTYPE_MATRIX, ANIMATIONLOOPMODE_CYCLE, 'anim', '_matrix')
self.previousBoneMatrix = None | conditional_block |
|
armature.py | .frames.append(frame)
self.animation.values.append(currentBoneMatrix)
self.previousBoneMatrix = currentBoneMatrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_rest_pose(self, editBone):
self.rest = Bone.get_matrix(editBone, self.matrix_world, True)
# used to calc skeleton restDimensions
self.restHead = editBone.head
self.restTail = editBone.tail
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_bone_matrix(self, doParentMult = True):
return Bone.get_matrix(self.posedBone, self.matrix_world, doParentMult)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def | (bpyBone, matrix_world, doParentMult):
SystemMatrix = Matrix.Scale(-1, 4, Vector((0, 0, 1))) * Matrix.Rotation(radians(-90), 4, 'X')
if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in | get_matrix | identifier_name |
armature.py | if (bpyBone.parent and doParentMult):
return (SystemMatrix * matrix_world * bpyBone.parent.matrix).inverted() * (SystemMatrix * matrix_world * bpyBone.matrix)
else:
return SystemMatrix * matrix_world * bpyBone.matrix
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: skeleton, bone, animation
def to_script_file(self, file_handler, indent):
parentBone = 'skeleton.bones[' + format_int(self.parentBoneIndex) + ']' if self.parentBone else 'null'
file_handler.write(indent + 'bone = new QI.Bone("' + self.name + '", skeleton,' + parentBone + ', _M(' + format_matrix4(self.matrix) + ')' + ', _M(' + format_matrix4(self.rest) + '));\n')
file_handler.write(indent + 'bone.length = ' + format_f(self.length) + ';\n')
if hasattr(self, 'animation'):
self.animation.to_script_file(file_handler, indent) # declares and set the variable animation
file_handler.write(indent + 'bone.animations.push(animation);\n\n')
#===============================================================================
class Skeleton:
# skipAnimations argument only used when exporting QI.SkeletonPoseLibrary
def __init__(self, bpySkeleton, scene, id, ignoreIKBones, skipAnimations = False):
if not skipAnimations:
Logger.log('processing begun of skeleton: ' + bpySkeleton.name + ', id: '+ str(id))
self.name = bpySkeleton.name
self.id = id
self.bones = []
if bpySkeleton.data.LibraryWithScene:
self.libraryName = bpySkeleton.data.libraryName
self.bpySkeleton = bpySkeleton # needed for call to build library
for bone in bpySkeleton.pose.bones:
if ignoreIKBones and Skeleton.isIkName(bone.name):
if not skipAnimations: Logger.log('Ignoring IK bone: ' + bone.name, 2)
continue
self.bones.append(Bone(bone, bpySkeleton, self.bones, skipAnimations))
if (bpySkeleton.animation_data and not skipAnimations):
self.ranges = []
frameOffset = 0
for action in bpy.data.actions:
# get the range / assigning the action to the object
animationRange = AnimationRange.actionPrep(bpySkeleton, action, FRAME_BASED_ANIMATION, frameOffset)
if animationRange is None:
continue
Logger.log('processing action ' + animationRange.to_string(), 2)
self.ranges.append(animationRange)
nFrames = len(animationRange.frames_in)
for idx in range(nFrames):
bpy.context.scene.frame_set(animationRange.frames_in[idx])
firstOrLast = idx == 0 or idx == nFrames - 1
for bone in self.bones:
bone.append_animation_pose(animationRange.frames_out[idx], firstOrLast)
frameOffset = animationRange.frame_end
# mode_set's only work when there is an active object, switch bones to edit mode to rest position
scene.objects.active = bpySkeleton
bpy.ops.object.mode_set(mode='EDIT')
# you need to access edit_bones from skeleton.data not skeleton.pose when in edit mode
for editBone in bpySkeleton.data.edit_bones:
for myBoneObj in self.bones:
if editBone.name == myBoneObj.name:
myBoneObj.set_rest_pose(editBone)
break
self.dimensions = self.getDimensions()
bpy.ops.object.mode_set(mode='OBJECT')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# do not use .dimensions from blender, it might be including IK bones
def getDimensions(self):
highest = Vector((-10000, -10000, -10000))
lowest = Vector(( 10000, 10000, 10000))
for bone in self.bones:
if highest.x < bone.restHead.x: highest.x = bone.restHead.x
if highest.y < bone.restHead.y: highest.y = bone.restHead.y
if highest.z < bone.restHead.z: highest.z = bone.restHead.z
if highest.x < bone.restTail.x: highest.x = bone.restTail.x
if highest.y < bone.restTail.y: highest.y = bone.restTail.y
if highest.z < bone.restTail.z: highest.z = bone.restTail.z
if lowest .x > bone.restHead.x: lowest .x = bone.restHead.x
if lowest .y > bone.restHead.y: lowest .y = bone.restHead.y
if lowest .z > bone.restHead.z: lowest .z = bone.restHead.z
if lowest .x > bone.restTail.x: lowest .x = bone.restTail.x
if lowest .y > bone.restTail.y: lowest .y = bone.restTail.y
if lowest .z > bone.restTail.z: lowest .z = bone.restTail.z
return Vector((highest.x - lowest.x, highest.y - lowest.y, highest.z - lowest.z))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter; assume skeletion is the active object
def getPose(self, idx):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.poselib.apply_pose(pose_index = idx)
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getRestAsPose(self):
# ensure pose mode, select all bones, clear tranforms, apply pose
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
ret = []
for bone in self.bones:
ret.append([bone.name, bone.get_bone_matrix()])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# used in PoseLibExporter
def getBoneLengths(self):
ret = []
for bone in self.bones:
ret.append([bone.name, bone.length])
return ret
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def isIkName(boneName):
return '.ik' in boneName.lower() or 'ik.' in boneName.lower()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Since IK bones could be being skipped, looking up index of bone in second pass of mesh required
def get_index_of_bone(self, boneName):
return Skeleton.get_bone(boneName, self.bones).index
@staticmethod
def get_bone(boneName, bones):
for bone in bones:
if boneName == bone.name:
return bone
# should not happen, but if it does clearly a bug, so terminate
raise Exception('bone name "' + boneName + '" not found in skeleton')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume the following JS variables have already been declared: scene, skeleton, bone, animation
def to_script_file(self, file_handler, indent, logInBrowserConsole):
# specifying scene gets skeleton added to scene in constructor
if logInBrowserConsole: file_handler.write(indent + "_B.Tools.Log('defining skeleton: " + self.name + "');\n")
file_handler.write(indent + 'skeleton = new QI.Skeleton("' + self.name + '", "' + format_int(self.id) + '", scene);\n') # MUST be String for inline
file_handler.write(indent + 'skeleton.dimensionsAtRest = new _V(' + format_vector(self.dimensions) + ');\n')
for bone in self.bones:
bone.to_script_file(file_handler, indent)
if hasattr(self, 'libraryName'):
file_handler.write(indent +'skeleton.assignPoseLibrary("' + self.libraryName + '");\n')
if hasattr(self, 'ranges'):
for range in self.ranges:
range.to_script_file(file_handler, indent, 'skeleton')
#===============================================================================
# determine all the meshes which are controlled by skeleton, called also by pose_lib | random_line_split |
||
poller.py | import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
| # temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str | random_line_split |
|
poller.py | (1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
| print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH) | conditional_block |
|
poller.py | import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
| time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str | print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1) | identifier_body |
poller.py | import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def | ():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), | do_state_display | identifier_name |
ddpg.py | , activation='softmax', weights_init=w_init)
return in_states, out_actions
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.onnet_in_states: inputs,
self.action_gradient: a_gradient
})
def predict(self, inp_states):
out_actions = self.sess.run(self.out, feed_dict={
self.onnet_in_states: inp_states
})
out_actions = out_actions[0]
#print("actor output actions", out_actions)
return out_actions
def predict_target(self, in_states):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: in_states
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def | choose_action | identifier_name |
|
ddpg.py | _num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def choose_action(probabilities):
choice = int(np.random.choice(ACTION_SPACE, 1, p=probabilities))
return choice
def main(_):
| with tf.Session() as sess:
# TODO: reduce network sizes. keep all states stop editing this ver, add dropout in successor
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess)
critic = CriticNetwork(sess, actor.get_num_trainable_vars())
env = gym.wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic) | identifier_body |
|
ddpg.py | ACTION_PROB_DIMS = 2
ACTION_BOUND = 1
ACTION_SPACE = [0, 1]
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'CartPole-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -2 and 2
"""
def __init__(self, sess):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.a_prob_dim = ACTION_PROB_DIMS
self.action_bound = ACTION_BOUND
self.learning_rate = ACTOR_LEARNING_RATE
self.tau = TAU
# Actor Network
self.onnet_in_states, self.out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[len(self.network_params):]
# Op for periodically updating target network with online network weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + \
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.actor_gradients = tf.gradients(self.out, self.network_params, -self.action_gradient)
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate). \
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(self.network_params) + len(self.target_network_params)
def create_actor_network(self):
in_states = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(in_states, 400, activation='relu')
net = tflearn.fully_connected(net, 300, activation='relu')
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out_actions = tflearn.fully_connected(net, ACTION_PROB_DIMS, activation='softmax', weights_init=w_init)
return in_states, out_actions
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.onnet_in_states: inputs,
self.action_gradient: a_gradient
})
def predict(self, inp_states):
out_actions = self.sess.run(self.out, feed_dict={
self.onnet_in_states: inp_states
})
out_actions = out_actions[0]
#print("actor output actions", out_actions)
return out_actions
def predict_target(self, in_states):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: in_states
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, | # Soft target update param
TAU = 0.05
STATE_DIM = 4
ACTION_DIM = 1 | random_line_split |
|
ddpg.py |
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, num_actor_vars):
self.sess = sess
self.s_dim = STATE_DIM
self.a_dim = ACTION_DIM
self.learning_rate = CRITIC_LEARNING_RATE
self.tau = TAU
# Create the critic network
self.in_states, self.in_actions, self.onnet_out_reward = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(
tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_values = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_values, self.onnet_out_reward)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action
self.action_grads = tf.gradients(self.onnet_out_reward, self.in_actions)
def create_critic_network(self):
inp_state = tflearn.input_data(shape=[None, self.s_dim])
inp_action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inp_state, 400, activation='relu')
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(inp_action, 300)
net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(inp_action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
#out_rewards = tflearn.layers.core.single_unit(net, activation='linear', name='output_rewards')
out_reward = tflearn.fully_connected(net, 1, weights_init=w_init) # FIXME predicts single reward, need string of rewards
return inp_state, inp_action, out_reward
def train(self, observed_states, observed_action, mixed_rewards): # note: replaced predicted_q_value with sum of mixed rewards
return self.sess.run([self.onnet_out_reward, self.optimize], feed_dict={
self.in_states: observed_states,
self.in_actions: observed_action,
self.predicted_q_values: mixed_rewards
})
def predict(self, inputs, action):
return self.sess.run(self.onnet_out_reward, feed_dict={
self.in_states: inputs,
self.in_actions: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.in_states: inputs,
self.in_actions: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
if RENDER_ENV:
env.render()
action_probabilities = actor.predict(np.reshape(s, (1, STATE_DIM)))
#print("action probs", action_probabilities)
action = choose_action(action_probabilities)
#print("action", action)
s2, r, done, info = env.step(action)
replay_buffer.add(np.reshape(s, (actor.s_dim,)), action, r, \
done, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, done_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# action probs to actions # TODO how to deal with non-determinate policies
# convert actor.predict_target(s2_batch) to actions
# the problem is that critic expects actions to always be determinate, when in fact they are probab
# Calculate targets
# todo can we just feed real a and s batch here, no s2?
# fixme critic predict expects 1D actions not 2D probabilities
a_batch = np.reshape(a_batch, (len(a_batch), 1))
#print("sbshape", np.shape(s_batch), "\n a shape", np.shape(a_batch))
targnet_predicted_reward = critic.predict_target(s_batch, a_batch)
#targnet_predicted_reward = critic.predict_target(s2_batch, actor.predict_target(s2_batch))
# print("targnet prediction", targnet_predicted_reward) # this is a whole reward tensor!!
# actually, we mix observations with predictions by factor gamma
# fixme I think we need to get rid of this block. targ reward is single value?
obs_plus_predicted_rewards = []
for k in range(MINIBATCH_SIZE):
if done_batch[k]:
obs_plus_predicted_rewards.append(r_batch[k]) # final timestep is just the reward
else:
obs_plus_predicted_rewards.append(r_batch[k] + GAMMA * targnet_predicted_reward[k])
obs_plus_predicted_rewards = np.reshape(obs_plus_predicted_rewards, (len(obs_plus_predicted_rewards), 1))
# Update the critic given the targets
predicted_q_value, _ = critic.train(s_batch, a_batch, obs_plus_predicted_rewards)
#predicted_q_value, _ = critic.train(s_batch, a_batch, np.reshape(observed_rewards, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
#a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_batch)
#grads = critic.action_gradients(s_batch, a_outs) # we aren't deterministic
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if done:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
# TODO checkwhich ep reward is being printed
print( # TODO replace maxq with something more interesting
'| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def choose_action(probabilities):
choice = int(np.random.choice(ACTION_SPACE, 1, p=probabilities))
return choice
def main(_):
with tf.Session() as sess:
# TODO: reduce network sizes. keep all states stop editing this ver, add dropout in successor
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess)
critic = CriticNetwork(sess, actor.get_num_trainable_vars())
env = gym.wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic)
if __name__ == '__main__':
| tf.app.run() | conditional_block |
|
lab3.py |
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
day | return (logline, 0) | conditional_block |
|
lab3.py | print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, | """ Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
| identifier_body |
|
lab3.py | /winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList') | Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30) | random_line_split |
|
lab3.py | ():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), ( | parseLogs | identifier_name |
|
lib.rs | , doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> |
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn get_ref(&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
}
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn | {
Rc::new(self.clone())
} | identifier_body |
lib.rs | , doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> {
Rc::new(self.clone())
}
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn | (&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
}
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn Node | get_ref | identifier_name |
lib.rs | , doing so will **`panic!`**.
///
/// If you do implement this method, also make sure to implement `implements_poll_child`
/// such that it returns true if you want it to be used on `self` specifically.
fn poll_child_mut(&mut self, _id: ID) { }
fn implements_poll_child(&self) -> bool { false }
/// Optional: You may implement this method for your struct if it does something special.
/// This includes:
///
/// - `.clone()` does not actually clone all the `Child` instances in the struct.
/// (also implement `poll_child` in this case)
/// - The struct contains a lot of fields which are expensive to copy and drop.
/// - The struct does not safely fit on the stack. (TODO: there are likely other issues with this)
///
/// Cod will use this when removing nodes from the tree, to find the children of this
/// node. If the implementation is not specialized, Cod will instead clone and then
/// immediately drop the struct to determine the children.
///
/// The implementation should call `.poll()` on each `Child` instance it contains
/// (not recursively!).
///
/// If you do implement this method, also make sure to implement `implements_poll_all`
/// such that it returns true if you want it to be used on `self` specifically.
/// In addition, you should implement `poll_all_mut`.
fn poll_all(&self) { }
/// Optional: See [`poll_all`]. This is the mutable version. The implementation should
/// call `.poll_mut()` on all `Child` instances associated with this node.
fn poll_all_mut(&mut self) { }
fn implements_poll_all(&self) -> bool { false }
}
/// This is a wrapper trait for `Node` which enables cloning through dynamic dispatch and RTTI.
/// It will be automatically implemented for any struct that is `Node + Clone`.
pub trait NodeClone: Node + Any {
fn dyn_clone(&self) -> Rc<dyn NodeClone>;
/// clone, then immediately drop. used for reflection
fn cod(&self);
}
impl<T: Node + Clone> NodeClone for T {
fn dyn_clone(&self) -> Rc<dyn NodeClone> {
Rc::new(self.clone())
}
fn cod(&self) {
let _ = self.clone();
}
}
pub struct Child<T: NodeClone> {
inner_ref: Rc<T>,
}
pub struct ParentID(ID);
impl From<ID> for ParentID {
fn from(id: ID) -> Self { ParentID(id) }
}
impl From<&Header> for ParentID {
fn from(header: &Header) -> Self { ParentID(header.id) }
}
impl<P: Node> From<&P> for ParentID {
fn from(parent: &P) -> Self { ParentID(parent.header().id) }
}
impl<T: NodeClone + Clone> Child<T> {
pub fn with_parent(parent: impl Into<ParentID>, node: T) -> Self {
Self::with_parent_id(parent.into().0, node)
}
fn with_parent_id(parent_id: ID, mut node: T) -> Self {
node.header_mut().parent_id = Some(parent_id);
let rc = Rc::new(node);
let child = Self {
inner_ref: rc.clone()
};
CONTEXT.with(|c| {
Context::poll(c, PollReason::Construct, rc);
});
child
}
/// TODO. avoid new clone if child has already been accessed during this mutation session.
pub fn make_mut(&mut self) -> MakeMutRef<'_, T> {
CONTEXT.with(|c| {
if Context::mutation_session_active(c) {
// let the context handle cloning (special stuff needs to happen)
if let Some(new_ref) =
Context::poll_mut(c, PollReason::MakeMutPre, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
} else {
Rc::make_mut(&mut self.inner_ref);
}
});
MakeMutRef {
child: self
}
}
pub fn get_ref(&self) -> Rc<T> {
Rc::clone(&self.inner_ref)
}
pub fn get_id(&self) -> ID {
self.inner_ref.header().id
}
pub fn set_parent(&mut self, parent: impl Into<ParentID>) {
self.make_mut().header_mut().parent_id = Some(parent.into().0);
}
/// Deep clone and set new parent. If you do not need to change the parent,
/// you may also use `.clone()` directly.
pub fn deep_clone_to_parent(&self, parent: impl Into<ParentID>) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::DeepCopy(parent.into().0), Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
}
pub fn poll(&self) {
CONTEXT.with(|c| {
Context::poll(c, PollReason::Manual, Rc::clone(&self.inner_ref));
});
}
pub fn poll_mut(&mut self) {
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::ManualMut, Rc::clone(&self.inner_ref)) {
self.inner_ref = new_ref;
}
});
}
}
impl<T: NodeClone> Deref for Child<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner_ref
}
}
impl<T: NodeClone> Clone for Child<T> {
// TODO: for user-facing cloning, there should (instead) be a separate deep_clone
// method that takes a new parent. similarly, there shold be a helper
// for moving Childs to a different parent.
fn clone(&self) -> Self {
let mut child = Self {
inner_ref: Rc::clone(&self.inner_ref),
};
CONTEXT.with(|c| {
if let Some(new_ref) =
Context::poll_mut(c, PollReason::Clone, Rc::clone(&child.inner_ref)) {
child.inner_ref = new_ref;
}
});
child
} | // so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::Drop, Rc::clone(&self.inner_ref));
});
}
}
pub struct MakeMutRef<'a, T: NodeClone> {
child: &'a mut Child<T>
}
impl<'a, T: NodeClone> Deref for MakeMutRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.child.inner_ref
}
}
impl<'a, T: NodeClone> DerefMut for MakeMutRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// Will not panic because the Child is mutably borrowed and
// the Rc was made unique upon creation of self
Rc::get_mut(&mut self.child.inner_ref).unwrap()
}
}
impl<'a, T: NodeClone> Drop for MakeMutRef<'a, T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data,
// so going to an old state after catching an unwind _should_ be fine.
if std::thread::panicking() { return }
CONTEXT.with(|c| {
Context::poll(c, PollReason::MakeMutPost, Rc::clone(&self.child.inner_ref));
});
}
}
impl<T: NodeClone + Debug> Debug for Child<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(self.inner_ref), f)
}
}
/// One state of the application.
/// States can be cloned freely and cloning is persistent, so it is very cheap.
///
/// R is the type of the root node.
#[derive(Clone)]
pub struct State<R: NodeClone + Clone> {
root: Rc<R>,
id_lookup: im::HashMap<ID, Weak<dyn NodeClone>>,
}
impl<R: NodeClone + Clone> State<R> {
/// Calls a closure that constructs the tree. No existing nodes can be moved in,
/// they all have to be created during the execution of this closure and on the same
/// thread.
pub fn construct<F: FnOnce() -> R>(construct: F) -> Self {
CONTEXT.with(|c| {
Context::begin_mutate(c);
});
let root = Rc::new(construct());
let mut state = Self {
root: Rc::clone(&root),
id_lookup: im::HashMap::new(),
};
CONTEXT.with(|c| {
state.apply_updates(Context::end_mutate(c));
});
state.id_lookup.insert(root.header().id, Rc::downgrade(&root) as Weak<dyn NodeClone | }
impl<T: NodeClone> Drop for Child<T> {
fn drop(&mut self) {
// XXX: This should only create inconsistencies in the newest version of the data, | random_line_split |
bundle.py | key_network(self) -> str:
return "network"
def key_network_def(self) -> str:
return "network_def"
def key_train_trainer_max_epochs(self) -> str:
return "train#trainer#max_epochs"
def key_train_dataset_data(self) -> str:
return "train#dataset#data"
def key_train_handlers(self) -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
return "validate#dataset#data"
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def | (self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, | key_experiment_name | identifier_name |
bundle.py | -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
return "validate#dataset#data"
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def key_experiment_name(self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, instantiate=True)
overrides[k] = {c: request[c] for c in displayable_configs.keys()}
if tracking and tracking.lower() != "none":
overrides[self.const.key_tracking()] = tracking
if tracking_uri:
overrides[self.const.key_tracking_uri()] = tracking_uri
if tracking_experiment_name:
| overrides[self.const.key_experiment_name()] = tracking_experiment_name | conditional_block |
|
bundle.py | model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, instantiate=True)
overrides[k] = {c: request[c] for c in displayable_configs.keys()}
if tracking and tracking.lower() != "none":
overrides[self.const.key_tracking()] = tracking
if tracking_uri:
overrides[self.const.key_tracking_uri()] = tracking_uri
if tracking_experiment_name:
overrides[self.const.key_experiment_name()] = tracking_experiment_name
if tracking_run_name:
overrides[self.const.key_run_name()] = tracking_run_name
# external validation datalist supported through bundle itself (pass -1 in the request to use the same)
if val_ds is not None:
overrides[self.const.key_validate_dataset_data()] = val_ds
# allow derived class to update further overrides
self._update_overrides(overrides)
if multi_gpu:
config_paths = [
c
for c in self.const.multi_gpu_configs()
if os.path.exists(os.path.join(self.bundle_path, "configs", c))
] | if not config_paths:
logger.warning(
f"Ignore Multi-GPU Training; No multi-gpu train config {self.const.multi_gpu_configs()} exists" | random_line_split |
|
bundle.py | key_network(self) -> str:
return "network"
def key_network_def(self) -> str:
return "network_def"
def key_train_trainer_max_epochs(self) -> str:
return "train#trainer#max_epochs"
def key_train_dataset_data(self) -> str:
return "train#dataset#data"
def key_train_handlers(self) -> str:
return "train#handlers"
def key_validate_dataset_data(self) -> str:
|
def key_tracking(self) -> str:
return "tracking"
def key_tracking_uri(self) -> str:
return "tracking_uri"
def key_experiment_name(self) -> str:
return "experiment_name"
def key_run_name(self) -> str:
return "run_name"
def key_displayable_configs(self) -> Sequence[str]:
return ["displayable_configs"]
class BundleTrainTask(TrainTask):
def __init__(
self,
path: str,
conf: Dict[str, str],
const: Optional[BundleConstants] = None,
enable_tracking=False,
model_dict_key="model",
load_strict=False,
):
self.valid: bool = False
self.conf = conf
self.const = const if const else BundleConstants()
self.enable_tracking = enable_tracking
self.model_dict_key = model_dict_key
self.load_strict = load_strict
config_paths = [c for c in self.const.configs() if os.path.exists(os.path.join(path, "configs", c))]
if not config_paths:
logger.warning(f"Ignore {path} as there is no train config {self.const.configs()} exists")
return
self.bundle_path = path
self.bundle_config_path = os.path.join(path, "configs", config_paths[0])
self.bundle_config = self._load_bundle_config(self.bundle_path, self.bundle_config_path)
# https://docs.monai.io/en/latest/mb_specification.html#metadata-json-file
self.bundle_metadata_path = os.path.join(path, "configs", "metadata.json")
with open(os.path.join(path, "configs", self.const.metadata_json())) as fp:
metadata = json.load(fp)
super().__init__(metadata.get("description", ""))
self.valid = True
self.version = metadata.get("version")
def is_valid(self):
return self.valid
def info(self):
i = super().info()
i["version"] = self.version
return i
def config(self):
# Add models and param optiom to train option panel
pytorch_models = [os.path.basename(p) for p in glob.glob(os.path.join(self.bundle_path, "models", "*.pt"))]
pytorch_models.sort(key=len)
config_options = {
"device": device_list(), # DEVICE
"pretrained": True, # USE EXISTING CHECKPOINT/PRETRAINED MODEL
"max_epochs": 50, # TOTAL EPOCHS TO RUN
"val_split": 0.2, # VALIDATION SPLIT; -1 TO USE DEFAULT FROM BUNDLE
"multi_gpu": True, # USE MULTI-GPU
"gpus": "all", # COMMA SEPARATE DEVICE INDEX
"tracking": ["mlflow", "None"]
if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED
else ["None", "mlflow"],
"tracking_uri": settings.MONAI_LABEL_TRACKING_URI,
"tracking_experiment_name": "",
"run_id": "", # bundle run id, if different from default
"model_filename": pytorch_models,
}
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
config_options.update(self.bundle_config.get_parsed_content(k, instantiate=True)) # type: ignore
return config_options
def _fetch_datalist(self, request, datastore: Datastore):
datalist = datastore.datalist()
# only use image and label attributes; skip for other meta info from datastore for now
datalist = [{"image": d["image"], "label": d["label"]} for d in datalist if d]
if "detection" in request.get("model"):
# Generate datalist for detection task, box and label keys are used by default.
# Future: either use box and label keys for all detection models, or set these keys by config.
for idx, d in enumerate(datalist):
with open(d["label"]) as fp:
json_object = json.loads(fp.read()) # load box coordinates from subject JSON
bboxes = [bdict["center"] + bdict["size"] for bdict in json_object["markups"]]
# Only support detection, classification label do not suppot in bundle yet,
# 0 is used for all positive boxes, wait for sync.
datalist[idx] = {"image": d["image"], "box": bboxes, "label": [0] * len(bboxes)}
return datalist
def _partition_datalist(self, datalist, request, shuffle=False):
val_split = request.get("val_split", 0.2)
logger.info(f"Total Records in Dataset: {len(datalist)}; Validation Split: {val_split}")
if val_split > 0.0:
train_datalist, val_datalist = partition_dataset(
datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle
)
else:
train_datalist = datalist
val_datalist = None if val_split < 0 else []
logger.info(f"Total Records for Training: {len(train_datalist)}")
logger.info(f"Total Records for Validation: {len(val_datalist) if val_datalist else ''}")
return train_datalist, val_datalist
def _load_checkpoint(self, model_pytorch, pretrained, train_handlers):
load_path = model_pytorch if pretrained else None
if os.path.exists(load_path):
logger.info(f"Add Checkpoint Loader for Path: {load_path}")
load_dict = {self.model_dict_key: f"$@{self.const.key_network()}"}
if not [t for t in train_handlers if t.get("_target_") == CheckpointLoader.__name__]:
loader = {
"_target_": CheckpointLoader.__name__,
"load_path": load_path,
"load_dict": load_dict,
"strict": self.load_strict,
}
train_handlers.insert(0, loader)
def __call__(self, request, datastore: Datastore):
logger.info(f"Train Request: {request}")
ds = self._fetch_datalist(request, datastore)
train_ds, val_ds = self._partition_datalist(ds, request)
max_epochs = request.get("max_epochs", 50)
pretrained = request.get("pretrained", True)
multi_gpu = request.get("multi_gpu", True)
force_multi_gpu = request.get("force_multi_gpu", False)
run_id = request.get("run_id", "run")
multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False
gpus = request.get("gpus", "all")
gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")]
multi_gpu = True if force_multi_gpu or multi_gpu and len(gpus) > 1 else False
logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}")
logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
device = name_to_device(request.get("device", "cuda"))
logger.info(f"Using device: {device}; Type: {type(device)}")
tracking = request.get(
"tracking", "mlflow" if self.enable_tracking and settings.MONAI_LABEL_TRACKING_ENABLED else ""
)
tracking = tracking[0] if isinstance(tracking, list) else tracking
tracking_uri = request.get("tracking_uri")
tracking_uri = tracking_uri if tracking_uri else settings.MONAI_LABEL_TRACKING_URI
tracking_experiment_name = request.get("tracking_experiment_name")
tracking_experiment_name = tracking_experiment_name if tracking_experiment_name else request.get("model")
tracking_run_name = request.get("tracking_run_name")
logger.info(f"(Experiment Management) Tracking: {tracking}")
logger.info(f"(Experiment Management) Tracking URI: {tracking_uri}")
logger.info(f"(Experiment Management) Experiment Name: {tracking_experiment_name}")
logger.info(f"(Experiment Management) Run Name: {tracking_run_name}")
train_handlers = self.bundle_config.get(self.const.key_train_handlers(), [])
model_filename = request.get("model_filename", "model.pt")
model_filename = model_filename if isinstance(model_filename, str) else model_filename[0]
model_pytorch = os.path.join(self.bundle_path, "models", model_filename)
self._load_checkpoint(model_pytorch, pretrained, train_handlers)
overrides = {
self.const.key_bundle_root(): self.bundle_path,
self.const.key_train_trainer_max_epochs(): max_epochs,
self.const.key_train_dataset_data(): train_ds,
self.const.key_device(): device,
self.const.key_train_handlers(): train_handlers,
}
# update config options from user
for k in self.const.key_displayable_configs():
if self.bundle_config.get(k):
displayable_configs = self.bundle_config.get_parsed_content(k, | return "validate#dataset#data" | identifier_body |
__init__.py | (0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else:
self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
self.state = State.WON
else:
raise CommandError("You can't click that cell!")
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def start(message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.click(*game.parse_pos(position))
if game.state == State.WON:
del cache[key]
return Response("\N{TROPHY} \N{TROPHY} YOU ARE WINNER! \N{TROPHY} \N{TROPHY}", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
elif game.state == State.LOST:
del cache[key]
return Response("\N{BOMB} \N{COLLISION SYMBOL} \N{COLLISION SYMBOL} BOOOOM!!!", attachments=[
ImageAttachment(await game.create_image_async(), "minesweeper.png")
])
else:
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper flag", "mine flag", "m flag", category="Games")
@channel_only
@games_allowed_only
async def flag(message):
"""
Toggle flags on one or more cells on minesweeper.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
positions = parse_list(message.content)
for position in positions:
if game.state != State.IN_PLAY:
break
game.toggle_flag(*game.parse_pos(position))
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper cheat", "mine cheat", category="Games", params=[])
@channel_only
@owners_only
async def cheat(message):
"""
Bot administrator command to show where bombs are for testing.
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # type: Game
except KeyError:
raise CommandError("Say 'start' to start a game first.")
return Response("", attachments=[ImageAttachment(await game.create_image_async(cheat=True), "minesweeper.png")])
def setup():
| config.add(bomb_chance)
commands.add(start)
commands.add(click)
commands.add(flag)
commands.add(cheat) | identifier_body |
|
__init__.py | _text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else:
self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
|
else:
raise CommandError("You can't click that cell!")
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def start(message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] # | self.state = State.WON | conditional_block |
__init__.py | _text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else:
self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
self.state = State.WON
else:
raise CommandError("You can't click that cell!")
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def | (message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] | start | identifier_name |
__init__.py | _text(draw, x, y, text, *args, font, **kwargs):
w, h = draw.textsize(text, font=font)
draw.text((x - w / 2, y - h / 2), text, *args, font=font, **kwargs)
def load_tile_graphics():
images = {}
for play in Play:
with pkg_resources.resource_stream(__name__, "assets/{}.png".format(play.name.lower())) as f:
images[play] = Image.open(f).convert('RGBA')
return images
class Play(Enum):
UNKNOWN = 'unknown'
CLEAR = 'clear'
FLAGGED = 'flagged'
EXPLODED = 'exploded'
class State(Enum):
IN_PLAY = 'in_play'
WON = 'won'
LOST = 'lost'
UNKNOWN_OR_FLAGGED = {Play.UNKNOWN, Play.FLAGGED}
TILE_GRAPHICS = load_tile_graphics()
class Game:
def __init__(self, w, h, mine_fraction, r=None):
r = random or random.Random()
self.width = w
self.height = h
self.bomb_map = list(map(lambda y: list(map(lambda x: r.random() <= mine_fraction, range(w))), range(h)))
self.play = list(map(lambda y: list(map(lambda x: Play.UNKNOWN, range(w))), range(h)))
self.state = State.IN_PLAY
self.remaining_unknown = w * h
self.bomb_count = 0
self.cell_size = 25
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.cell_font = ImageFont.truetype(f, 10)
with pkg_resources.resource_stream("plumeria", 'fonts/FiraSans-Regular.ttf') as f:
self.count_font = ImageFont.truetype(f, 15)
# count bombs
for x in range(w):
for y in range(h):
if self.bomb_map[x][y]:
self.bomb_count += 1
self.remaining_unknown -= 1
if self.bomb_count == 0:
raise CommandError("No bombs found in created game! Make sure the bomb "
"`minesweeper/bomb_chance` setting is not near 0%.")
# start it off
tries = 0
while self.remaining_unknown > 0 and tries < 20:
x = r.randrange(0, self.width)
y = r.randrange(0, self.height)
if not self.bomb_map[x][y] and not self._count_adjacent_bombs(x, y):
self.click(x, y)
break
tries += 1
def create_image(self, cheat=False) -> PIL.Image.Image:
w = self.width * self.cell_size
h = self.height * self.cell_size
im = Image.new("RGBA", (w, h), "white")
draw = ImageDraw.Draw(im)
for y in range(self.height):
for x in range(self.width):
cx = (x + 0.5) * self.cell_size
cy = (y + 0.5) * self.cell_size
play = self.play[x][y]
# draw background
tile = TILE_GRAPHICS[play].copy().resize((self.cell_size, self.cell_size), PIL.Image.BICUBIC)
im.paste(tile, (x * self.cell_size, y * self.cell_size), mask=tile)
# location text
if play in UNKNOWN_OR_FLAGGED:
draw.text((x * self.cell_size + 2, y * self.cell_size + 2), cell_name(x, y), (68, 68, 150),
font=self.cell_font)
if play == Play.CLEAR:
count = self._count_adjacent_bombs(x, y)
if count:
draw_centered_text(draw, cx, cy - 2, str(count), (217, 50, 50), font=self.count_font)
if cheat and self.bomb_map[x][y]:
draw_centered_text(draw, cx, cy - 2, "XX", (217, 50, 50), font=self.count_font)
return im
async def create_image_async(self, *args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None,
functools.partial(self.create_image, *args, **kwargs))
def parse_pos(self, str):
m = POS_RE.match(str)
if not m:
raise CommandError("Invalid position '{}' (expected something like C2)".format(str))
x = string.ascii_uppercase.find(m.group(1).upper())
y = int(m.group(2)) - 1
if self._in_bounds(x, y):
return x, y
else:
raise CommandError("Your position '{}' isn't in the grid!".format(str))
def toggle_flag(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
self.play[x][y] = Play.FLAGGED if self.play[x][y] == Play.UNKNOWN else Play.UNKNOWN
else:
raise CommandError("You can't flag that cell!")
def click(self, x, y):
if self.state != State.IN_PLAY:
raise AssertionError("invalid state")
if self.play[x][y] in (Play.UNKNOWN, Play.FLAGGED):
if self.bomb_map[x][y]: # bomb
self._mutate_cell(x, y, Play.EXPLODED)
self.state = State.LOST
else: |
def _in_bounds(self, x, y):
return 0 <= x < self.width and 0 <= y < self.height
def _is_bomb(self, x, y):
return self._in_bounds(x, y) and self.bomb_map[x][y]
def _count_adjacent_bombs(self, x, y):
return sum([
self._is_bomb(x - 1, y),
self._is_bomb(x, y - 1),
self._is_bomb(x + 1, y),
self._is_bomb(x, y + 1),
self._is_bomb(x - 1, y - 1),
self._is_bomb(x - 1, y + 1),
self._is_bomb(x + 1, y - 1),
self._is_bomb(x + 1, y + 1),
])
def _clear_cell(self, x, y, visited):
if not self._in_bounds(x, y):
return
if (x, y) in visited:
return
visited.add((x, y))
if self.play[x][y] in UNKNOWN_OR_FLAGGED and not self.bomb_map[x][y]:
self._mutate_cell(x, y, Play.CLEAR)
if not self._count_adjacent_bombs(x, y):
self._clear_cell(x - 1, y, visited)
self._clear_cell(x, y - 1, visited)
self._clear_cell(x + 1, y, visited)
self._clear_cell(x, y + 1, visited)
self._clear_cell(x - 1, y - 1, visited)
self._clear_cell(x - 1, y + 1, visited)
self._clear_cell(x + 1, y - 1, visited)
self._clear_cell(x + 1, y + 1, visited)
def _mutate_cell(self, x, y, new_play: Play):
if self.play[x][y] in UNKNOWN_OR_FLAGGED and new_play != Play.UNKNOWN:
self.remaining_unknown -= 1
self.play[x][y] = new_play
else:
raise AssertionError("this shouldn't happen (is {}, wants to be {})".format(self.play[x][y], new_play))
cache = cachetools.LRUCache(maxsize=1000)
@commands.create("minesweeper start", "mine start", "m start", category="Games", params=[])
@channel_only
@games_allowed_only
async def start(message):
"""
Starts a game of minesweeper.
Example::
mine start
"""
key = (message.transport.id, message.server.id, message.channel.id)
if key in cache:
game = cache[key]
else:
game = Game(12, 12, scoped_config.get(bomb_chance, message.channel) / 100, random.Random())
cache[key] = game
return Response("", attachments=[ImageAttachment(await game.create_image_async(), "minesweeper.png")])
@commands.create("minesweeper", "mine", "m", category="Games")
@channel_only
@games_allowed_only
async def click(message):
"""
Click one or more cells on minesweeper.
Start a game with::
mine start
Then choose one or more cells::
mine b5 g7 a7 a1
"""
key = (message.transport.id, message.server.id, message.channel.id)
try:
game = cache[key] | self._clear_cell(x, y, set())
if self.remaining_unknown == 0:
self.state = State.WON
else:
raise CommandError("You can't click that cell!") | random_line_split |
lsh.py |
def jaccard_sim(X, Y):
"""Jaccard similarity between two sets"""
x = set(X)
y = set(Y)
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y)
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy | yield hash(s) | conditional_block |
|
lsh.py | (s, k):
"""Generate k-length shingles of string s"""
k = min(len(s), k)
for i in range(len(s) - k + 1):
yield s[i:i+k]
def hshingle(s, k):
"""Generate k-length shingles then hash"""
for s in shingle(s, k):
yield hash(s)
def jaccard_sim(X, Y):
"""Jaccard similarity between two sets"""
x = set(X)
y = set(Y)
return float(len(x & y)) / len(x | y)
def jaccard_dist(X, Y):
"""Jaccard distance between two sets"""
return 1 - jaccard_sim(X, Y)
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). | shingle | identifier_name |
|
lsh.py | 1 - jaccard_sim(X, Y) | class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
"""Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
if sigmaps_to_merge is None:
self.sigmap = {}
else:
self.sigmap = dict(reduce(operator.__add__,
[sm.items() for sm in sigmaps_to_merge]))
def sign(self, s | random_line_split |
|
lsh.py | 1 - jaccard_sim(X, Y)
class Signature(object):
"""Signature Base class."""
def __init__(self, dim):
self.dim = dim
self.hashes = self.hash_functions()
def hash_functions(self):
"""Returns dim different hash functions"""
pass
def sign(self, object):
"""Return the signature for object s"""
pass
class MinHashSignature(Signature):
"""Creates signatures for sets/tuples using minhash."""
def hash_functions(self):
"""Return dim different hash functions"""
def hash_factory(n):
return lambda x: hash("salt" + str(n) + str(x) + "salt")
return [ hash_factory(_) for _ in range(self.dim) ]
def sign(self, s):
"""Returns minhash signature for set s"""
sig = [ float("inf") ] * self.dim
for hash_ix, hash_fn in enumerate(self.hashes):
sig[hash_ix] = min(hash_fn(value) for value in s)
return sig
class LSH(object):
"""Locality sensitive hashing. Uses a banding approach to hash
similar signatures to the same buckets."""
def __init__(self, length, threshold):
self.length = length
self.threshold = threshold
self.bandwidth = self.get_bandwidth(length, threshold)
def hash(self, sig, band_idx=None):
"""Generate hashvals for this signature"""
for band in zip(*(iter(sig),) * self.bandwidth):
yield hash("salt" + str(band) + "tlas")
def get_bandwidth(self, n, t):
"""Approximates the bandwidth (number of rows in each band)
needed to get threshold.
Threshold t = (1/b) ** (1/r) where
b = #bands
r = #rows per band
n = b * r = #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (t ** r)
except: # Divide by zero, your signature is huge
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def get_threshold(self):
r = self.bandwidth
b = self.length / r
return (1. / b) ** (1. / r)
def get_n_bands(self):
return int(self.length / self.bandwidth)
class Cluster(object):
|
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
self.hashmaps[band_idx][hshval].append(label)
self.unionfind.union(label, self.hashmaps[band_idx][hshval][0])
def get_sets(self):
return self.unionfind.sets()
class ConstrainedCluster(Cluster):
"""To fight the problem of big clusters created by the aggregation of a
large number of false positives (i.e. two items found to be a candidate
pair, but that really shouldn't belong to the same cluster), this class
introduces an extra constraint which must be met for two items to be
clustered. This mechanism imposes that we keep track of extra items, that
are encapsulated in the LabelObj namedtuple. The constraint, by default, is
that the Jaccard Similarity must be as high as the hasher threshold, which
is defined with this anonymous function:
lambda lo1, lo2: jaccard_sim(lo1.obj, lo2.obj)
where the lo's are object of type LabelObj. However, this could be easily
redefined to a function possibly more useful in some context, like the
Levenshtein Ratio for instance (or any other similarity function to be
maximized):
lambda lo1, lo2: Levenshtein.ratio(lo1.obj, lo2.obj)
which will work, provided that an "obj" argument has been previously passed
to add_set. In this case "obj" is a string, but it could be of whatever
type, as long as the "contraint_fn" function properly handles it.
"""
# Structure to be stored in the ConstrainedCluster.hashmaps band/hash cell
# cluster lists.
LabelObj = namedtuple('LabelObj', 'label obj')
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj)):
super(ConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
def add_set(self, s, label=None, obj=None):
# A label for this set
if not label:
label = s
# if obj is not defined, s is used
lo = ConstrainedCluster.LabelObj(label, obj if obj else s)
# Add to unionfind structure
self.unionfind[label]
# Get signature
sig = self.signer.sign(s)
# Union labels with same LSH key in same band that satisfy constraint
for band_idx, hshval in enumerate(self.hasher.hash(sig)):
# apply the constraint function to compare the current element
# to every first element of every candidate clusters
jsc = [(self.constraint_fn(lo, cluster[0]), cluster)
for cluster in self.hashmaps[band_idx][hshval]]
# retain the best (if it exists) of those over the threshold
jsc = sorted([(js, cluster) for js, cluster in jsc
if js >= self.constraint_min], reverse=True)
if jsc:
cluster = jsc[0][1]
cluster.append(deepcopy(lo))
# the candidate pair is now clustered
self.unionfind.union(lo.label, cluster[0].label)
else:
# no clustering is performed
self.hashmaps[band_idx][hshval].append([deepcopy(lo)])
class SemiParallellizableConstrainedCluster(Cluster):
"""This is a semi-parallel version of ConstrainedCluster, to be used with
multiprocessing; explanations and documentation soon to come..
"""
def __init__(self, width=10, threshold=0.5,
constraint_min=None,
constraint_fn=lambda lo1, lo2:
jaccard_sim(lo1.obj, lo2.obj),
sigmaps_to_merge=None):
super(SemiParallellizableConstrainedCluster, self).__init__(width, threshold)
if constraint_min is None:
self.constraint_min = threshold
else:
self.constraint_min = constraint_min
self.constraint_fn = constraint_fn
# Note that self.hashmaps, although having the same structure as in the
# parent class, is used quite differently here: each band/hash cell now
# corresponds to a list of lists (instead of a single list). Each list
# contains at least one LabelSetObj instance, and will possibly grow
# when hash collisions occur. However, to be fused within a certain
# list, an item must be similar enough to its first item (i.e. the
# constraint must be satisfied). If no list is found with an item to
# satisfy the constraint, a new list with the element is simply appended
# to the band/hash cell.
if sigmaps_to_merge is None:
self.sigmap = {}
else:
self.sigmap = dict(reduce(operator.__add__,
[sm.items() for sm in sigmaps_to_merge]))
def sign(self, s, | """Clusters sets with Jaccard similarity above threshold with high
probability.
Algorithm based on Rajaraman, "Mining of Massive Datasets":
1. Generate set signature
2. Use LSH to map similar signatures to same buckets
3. Use UnionFind to merge buckets containing same values
"""
def __init__(self, width=10, threshold=0.5):
self.width = width
self.unionfind = UnionFind()
self.signer = MinHashSignature(width)
self.hasher = LSH(width, threshold)
self.hashmaps = [defaultdict(list)
for _ in range(self.hasher.get_n_bands())]
def add_set(self, s, label=None):
# A label for this set
if not label:
label = s | identifier_body |
denoising_autoencoder.py | , corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
"""
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
|
return self.weights.get_value(borrow=True)
def train_model(self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
| def get_weight_matrix(self):
"""
Get the weight matrix.
""" | random_line_split |
denoising_autoencoder.py | , corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
"""
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
def get_weight_matrix(self):
"""
Get the weight matrix.
"""
return self.weights.get_value(borrow=True)
def | (self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
| train_model | identifier_name |
denoising_autoencoder.py | , corruption=0.3):
"""
input_dimension: The dimension of the input vectors.
hidden_dimension: How many hidden nodes to map to.
input_batch: Optional. Input data.
output_batch: Optional. A vector of labels corresponding to each input vector.
output_dimension: How many labels there are.
symbolic_input: Optional. A symbolic input value.
rng: Optional. A NumPy RandomState.
theano_rng: Optional. A Theano RandomStream.
learning_rate: Optional. How large gradient descent jumps are.
corruption: Optional. How much to corrupt the input when learning.
"""
self.input_dimension = input_dimension
self.hidden_dimension = hidden_dimension
self.output_batch = output_batch
self.output_dimension = output_dimension
if symbolic_input is None:
self.initialise_symbolic_input()
else:
self.symbolic_input = symbolic_input
self.initialise_symbolic_output()
if rng is None:
self.initialise_rng()
else:
self.rng = rng
if theano_rng is None:
self.initialise_theano_rng()
else:
self.theano_rng = theano_rng
self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
| self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
def get_weight_matrix(self):
"""
Get the weight matrix.
"""
return self.weights.get_value(borrow=True)
def train_model(self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
yield | """
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights - | identifier_body |
denoising_autoencoder.py | self.corruption = corruption
self.input_batch = input_batch
self.activation = theano.tensor.nnet.sigmoid
self.learning_rate = learning_rate
self.initialise_corrupted_input()
self.initialise_parameters()
self.initialise_theano_functions()
def initialise_corrupted_input(self):
self.symbolic_corrupted_input = self.theano_rng.binomial(
size=self.symbolic_input.shape,
n=1,
p=1 - self.corruption,
dtype=theano.config.floatX) * self.symbolic_input
def initialise_theano_rng(self):
"""
Initialise and store a Theano RandomStream.
"""
self.theano_rng = RandomStreams(self.rng.randint(2**30))
def initialise_parameters(self):
"""
Initialises and subsequently stores a weight matrix, bias vector,
reverse bias vector, label weight matrix, and label bias vector.
"""
low = -numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
high = numpy.sqrt(6/(self.input_dimension + self.hidden_dimension))
if self.activation is theano.tensor.nnet.sigmoid:
# We know the optimum distribution for tanh and sigmoid, so we
# assume that we're using tanh unless we're using sigmoid.
low *= 4
high *= 4
self.weights = theano.shared(
value=numpy.asarray(
self.rng.uniform( # This distribution is apparently optimal for tanh.
low=low,
high=high,
size=(self.input_dimension, self.hidden_dimension)),
dtype=theano.config.floatX),
name="W",
borrow=True)
self.bias = theano.shared(
value=numpy.zeros((self.hidden_dimension,),
dtype=theano.config.floatX),
name="b",
borrow=True)
self.reverse_bias = theano.shared(
value=numpy.zeros((self.input_dimension,),
dtype=theano.config.floatX),
name="b'",
borrow=True)
self.reverse_weights = self.weights.T # Tied weights, so the reverse weight
# matrix is just the transpose.
self.label_weights = theano.shared(
value=numpy.zeros((self.hidden_dimension, self.output_dimension),
dtype=theano.config.floatX),
name="lW",
borrow=True)
self.label_bias = theano.shared(
value=numpy.zeros((self.output_dimension,),
dtype=theano.config.floatX),
name="lb",
borrow=True)
def initialise_rng(self):
"""
Initialises and subsequently stores a NumPy RandomState.
"""
self.rng = numpy.random.RandomState()
def initialise_symbolic_input(self):
"""
Initialises and subsequently stores a symbolic input value.
"""
self.symbolic_input = theano.tensor.dmatrix("x")
def initialise_symbolic_output(self):
"""
Initialises and subsequently stores a symbolic output value.
"""
self.symbolic_output = theano.tensor.ivector("y")
def get_hidden_output(self):
"""
Get the values output by the hidden layer.
"""
return self.activation(
theano.tensor.dot(self.symbolic_corrupted_input, self.weights) +
self.bias)
def get_reconstructed_input(self):
"""
Get the reconstructed input.
"""
return self.activation(
theano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +
self.reverse_bias)
def error_rate(self):
"""
Get the rate of incorrect prediction.
"""
return theano.tensor.mean(theano.tensor.neq(
self.get_symbolic_predicted_labels(),
self.symbolic_output))
def get_cost(self):
"""
Get the symbolic cost for the weight matrix and bias vectors.
"""
x = self.symbolic_input
y = self.get_reconstructed_input()
negative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +
(1-x)*theano.tensor.log(1-y), axis=1)
mean_loss = theano.tensor.mean(negative_log_loss)
return mean_loss
def get_lr_cost(self):
"""
Get the symbolic cost for the logistic regression matrix and bias vector.
"""
labels = self.get_symbolic_expected_rewards()
return -theano.tensor.mean(
theano.tensor.log(labels)[
theano.tensor.arange(self.symbolic_output.shape[0]),
self.symbolic_output])
def get_symbolic_predicted_labels(self):
"""
Predict labels of a minibatch.
"""
return theano.tensor.argmax(self.get_symbolic_expected_rewards(), axis=1)
def get_symbolic_expected_rewards(self):
"""
Get probabilities of the input values being each label.
"""
prob_matrix = theano.tensor.nnet.softmax(
theano.tensor.dot(self.get_hidden_output(),
self.label_weights) + self.label_bias)
return prob_matrix
def get_updates(self):
"""
Get a list of updates to make when the model is trained.
"""
da_cost = self.get_cost()
weight_gradient = theano.tensor.grad(da_cost, self.weights)
bias_gradient = theano.tensor.grad(da_cost, self.bias)
reverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)
lr_cost = self.get_lr_cost()
lr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)
lr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)
updates = [
(self.weights, self.weights - self.learning_rate*weight_gradient),
(self.bias, self.bias - self.learning_rate*bias_gradient),
(self.reverse_bias, self.reverse_bias -
self.learning_rate*reverse_bias_gradient),
(self.label_weights, self.label_weights -
self.learning_rate*lr_weight_gradient),
(self.label_bias, self.label_bias -
self.learning_rate*lr_bias_gradient)]
return updates
def initialise_theano_functions(self):
"""
Compile Theano functions for symbolic variables.
"""
index = theano.tensor.lscalar("i")
batch_size = theano.tensor.lscalar("b")
validation_images = theano.tensor.matrix("vx")
validation_labels = theano.tensor.ivector("vy")
input_matrix = theano.tensor.matrix("ix")
if (self.input_batch is not None and
self.output_batch is not None):
self.train_model_once = theano.function([index, batch_size],
outputs=self.get_cost(),
updates=self.get_updates(),
givens={
self.symbolic_input: self.input_batch[index*batch_size:
(index+1)*batch_size],
self.symbolic_output: self.output_batch[index*batch_size:
(index+1)*batch_size]})
self.validate_model = theano.function(inputs=[validation_images, validation_labels],
outputs=self.error_rate(),
givens={
self.symbolic_input: validation_images,
self.symbolic_output: validation_labels},
allow_input_downcast=True)
self.get_expected_rewards = theano.function([input_matrix],
outputs=self.get_symbolic_expected_rewards(),
givens={self.symbolic_input: input_matrix})
def get_weight_matrix(self):
"""
Get the weight matrix.
"""
return self.weights.get_value(borrow=True)
def train_model(self
, epochs=100
, minibatch_size=20
, yield_every_iteration=False):
"""
Train the model against the given data.
epochs: How long to train for.
minibatch_size: How large each minibatch is.
yield_every_iteration: When to yield.
"""
if self.input_batch is None:
raise ValueError("Denoising autoencoder must be initialised with "
"input data to train model independently.")
if self.output_batch is None:
raise ValueError("RMI denoising autoencoder must be initialised "
"with output data to train model independently.")
batch_count = self.input_batch.get_value(
borrow=True).shape[0]//minibatch_size
for epoch in xrange(epochs):
costs = []
for index in xrange(batch_count):
cost = self.train_model_once(index, minibatch_size)
costs.append(cost)
if yield_every_iteration:
yield (index, cost)
if not yield_every_iteration:
yield (epoch, numpy.mean(costs))
def test_DA(DA, epochs=15):
from sys import argv
import lib.mnist as mnist
print "loading training images"
images = mnist.load_training_images(format="theano", validation=False, div=256.0)
labels = mnist.load_training_labels(format="theano", validation=False)
print "loading test images"
validation_images = mnist.load_training_images(format="numpy", validation=True)
validation_labels = mnist.load_training_labels(format="numpy", validation=True)
print "instantiating denoising autoencoder"
corruption = 0.3
learning_rate = 0.1
hiddens = 500
da = DA(784, hiddens,
input_batch=images,
output_batch=labels,
output_dimension=10,
corruption=corruption,
learning_rate=learning_rate)
print "training..."
for epoch, cost in da.train_model(epochs):
| print epoch, cost
print "wrong {:.02%} of the time".format(
float(da.validate_model(validation_images, validation_labels))) | conditional_block |
|
webapp.py | dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc...
pass
return "dev"
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload. | # /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload | """ | random_line_split |
webapp.py | pass
return "dev"
class SgJiraBridgeBadRequestError(Exception):
"""
Custom exception so we can differentiate between errors we raise that
should return 4xx error codes and errors in the application which should
return 500 error codes.
"""
pass
class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
Basic server with threading functionality mixed in. This will help the server
keep up with a high volume of throughput from ShotGrid and Jira.
"""
def __init__(self, settings, *args, **kwargs):
# Note: BaseHTTPServer.HTTPServer is not a new style class so we can't use
# super here.
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._sg_jira = sg_jira.Bridge.get_bridge(settings)
def sync_in_jira(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_jira(*args, **kwargs)
def sync_in_shotgun(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Brige method.
"""
return self._sg_jira.sync_in_shotgun(*args, **kwargs)
def admin_reset(self, *args, **kwargs):
"""
Just pass the given parameters to the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
" | """
Helper to extract a version number for the sg-jira-bridge module.
This will attenmpt to extract the version number from git if installed from
a cloned repo. If a version is unable to be determined, or the process
fails for any reason, we return "dev"
:returns: A major.minor.patch[.sub] version string or "dev".
"""
# Note: if you install from a cloned git repository
# (e.g. pip install ./tk-core), the version number
# will be picked up from the most recently added tag.
try:
version_git = subprocess.check_output(
["git", "describe", "--abbrev=0"]
).rstrip()
return version_git
except Exception:
# Blindly ignore problems. Git might be not available, or the user may
# have installed via a zip archive, etc... | identifier_body |
|
webapp.py |
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id")
if not entity_type or not entity_key:
raise SgJiraBridgeBadRequestError(
"Invalid request payload %s, unable to retrieve a Shotgun Entity type and its id."
% payload
)
# We could have a str or int here depending on how it was sent.
try:
entity_key = int(entity_key)
except ValueError as e:
# log the original exception before we obfuscate it
logger.debug(e, exc_info=True)
raise SgJiraBridgeBadRequestError(
"Invalid Shotgun %s id %s, it must be a number."
% (entity_type, entity_key,)
)
self.server.sync_in_jira(
settings_name, entity_type, int(entity_key), event=payload, **parameters
)
elif direction == "jira2sg":
if not entity_type or not entity_key:
# We can't retrieve this easily from the webhook payload without
# hard coding a list of supported resource types, so we require
# it to be specified in the path for the time being.
raise SgJiraBridgeBadRequestError(
"Invalid request path %s, it must include a Jira resource "
"type and its key" % self.path
)
self.server.sync_in_shotgun(
settings_name, entity_type, entity_key, event=payload, **parameters
)
def _handle_admin_request(self, path_parts, parameters):
"""
Handle admin request to the server.
Currently handles a single action, ``reset`` which resets the Bridge
in order to clear out the ShotGrid schema cache.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed.
admin/reset
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["admin", "reset"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
# The only function we respond to now is reset
if len(path_parts) != 2 or path_parts[1] != "reset":
raise SgJiraBridgeBadRequestError(
"Invalid admin path '%s'. Action is not set or unsupported." % self.path
)
self.server.admin_reset(**parameters)
def log_message(self, format, *args):
"""
Override :class:`BaseHTTPServer.BaseHTTPRequestHandler` method to use a
standard logger.
:param str format: A format string, e.g. '%s %s'.
:param args: Arbitrary list of arguments to use with the format string.
"""
message = "%s - %s - %s" % (self.client_address[0], self.path, format % args)
logger.info(message)
def | log_error | identifier_name |
|
webapp.py | the SG Jira Bridge method.
"""
return self._sg_jira.reset(*args, **kwargs)
@property
def sync_settings_names(self):
"""
Return the list of sync settings this server handles.
"""
return self._sg_jira.sync_settings_names
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# On Python3, in socketserver.StreamRequestHandler, if this is
# set it will use makefile() to produce the output stream. Otherwise,
# it will use socketserver._SocketWriter, and we won't be able to get
# to the data.
# taken from https://stackoverflow.com/a/53163148/4223964
wbufsize = 1
protocol_version = "HTTP/1.1"
# Inject the version of sg-jira-bridge into server_version for the headers.
server_version = "sg-jira-bridge/%s %s" % (
get_sg_jira_bridge_version(),
BaseHTTPServer.BaseHTTPRequestHandler.server_version,
)
# BaseHTTPServer Class variable that stores the HTML template for error
# pages. Override the default error page template with our own.
error_message_format = HTML_ERROR_TEMPLATE
def post_response(self, response_code, message, content=None):
"""
Convenience method for handling the response
Handles sending the response, setting headers, and writing any
content in the expected order. Sets appropriate headers including
content length which is required by HTTP/1.1.
:param int response_code: Standard HTTP response code sent in headers.
:param str message: Message to accompany response code in headers.
:param str content: Optional content to return as content in the
response. This is typically html displayed in a browser.
"""
# NOTE: All responses must:
# - send the response first.
# - then, if there is some data, call end_headers to add a blank line.
# - then write the data, if any, with self.wfile.write
self.send_response(response_code, message)
content_len = 0
if content:
content_len = len(content)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Content-Length", content_len)
# TODO: Ideally we use the default functionality of HTTP/1.1 where
# keep-alive is True (no header needed). However, for some reason,
# this currently blocks new connections for 60 seconds (likely the
# default keep-alive timeout). So for now we explicitly close the
# connection with the header below to ensure things run smoothly.
# Once the issue has been resolved, we can remove this header.
self.send_header("Connection", "close")
self.end_headers()
if content:
self.wfile.write(content)
def do_GET(self):
"""
Handle a GET request.
"""
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in self.path[1:].split("/") if x]
if not path_parts:
self.post_response(
200,
"The server is alive",
HMTL_TEMPLATE % ("The server is alive", "The server is alive", ""),
)
return
# Return a correct error for browser favicon requests in order to
# reduce confusing log messages that look bad but aren't.
if len(path_parts) == 1 and path_parts[0] == "favicon.ico":
self.send_error(404)
return
if path_parts[0] == "sg2jira":
title = "Shotgun to Jira"
elif path_parts[0] == "jira2sg":
title = "Jira to Shotgun"
else:
self.send_error(400, "Invalid request path %s" % self.path)
return
settings_name = path_parts[1]
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
self.send_error(400, "Invalid settings name %s" % settings_name)
return
# Success, send a basic html page.
self.post_response(
200,
six.ensure_binary("Syncing with %s settings." % settings_name),
six.ensure_binary(
HMTL_TEMPLATE
% (title, title, "Syncing with %s settings." % settings_name)
),
)
def do_POST(self):
"""
Handle a POST request.
Post url paths need to have the form::
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
admin/reset
If the SG Entity is not specified in the path, it must be specified in
the provided payload.
"""
# /sg2jira/default[/Task/123]
# /jira2sg/default/Issue/KEY-123
# /admin/reset
try:
parsed = parse.urlparse(self.path)
# Extract additional query parameters.
# What they could be is still TBD, may be things like `dry_run=1`?
parameters = {}
if parsed.query:
parameters = parse.parse_qs(parsed.query, True, True)
# Extract path components from the path, ignore leading '/' and
# discard empty values coming from '/' at the end or multiple
# contiguous '/'.
path_parts = [x for x in parsed.path[1:].split("/") if x]
if not path_parts:
self.send_error(400, "Invalid request path %s" % self.path)
# Treat the command
if path_parts[0] == "admin":
self._handle_admin_request(path_parts, parameters)
elif path_parts[0] in ["sg2jira", "jira2sg"]:
self._handle_sync_request(path_parts, parameters)
else:
self.send_error(
400,
"Invalid request path %s: unknown command %s"
% (self.path, path_parts[0]),
)
return
self.post_response(200, "POST request successful")
except SgJiraBridgeBadRequestError as e:
self.send_error(400, str(e))
except Exception as e:
self.send_error(500, str(e))
logger.debug(e, exc_info=True)
def _read_payload(self):
"""
Read the body of a request to get the payload.
:returns: payload as a dictionary or empty dict if there was no payload
"""
content_type = self.headers.get("content-type")
# Check the content type, if not set we assume json.
# We can have a charset just after the content type, e.g.
# application/json; charset=UTF-8.
if content_type and not re.search(r"\s*application/json\s*;?", content_type):
raise SgJiraBridgeBadRequestError(
"Invalid content-type %s, it must be 'application/json'" % content_type
)
content_len = int(self.headers.get("content-length", 0))
body = self.rfile.read(content_len)
payload = {}
if body:
payload = json.loads(body)
return payload
def _handle_sync_request(self, path_parts, parameters):
"""
Handle a request to sync between ShotGrid and Jira in either direction.
At this point, only the action (the first path_part) from the request
path has been validated. The rest of the path_parts still need to be
validated before we proceed. We expect the path to for this request to
be one of the following:
sg2jira/<settings_name>[/<sg_entity_type>/<sg_entity_id>]
jira2sg/<settings_name>/<jira_resource_type>/<jira_resource_key>
If the SG Entity is not specified in the path, it must be present in
the loaded payload.
:param list path_parts: List of strings representing each part of the
URL path that this request accessed. For example,
``["sg2jira", "default", "Task", "123"]``.
:param dict parameters: Optional additional parameters that were extracted
from the url.
:raises SgJiraBridgeBadRequestError: If there is any problem we detect with the
path, or payload.
"""
entity_type = None
entity_key = None
if len(path_parts) == 4:
direction, settings_name, entity_type, entity_key = path_parts
elif len(path_parts) == 2:
direction, settings_name = path_parts
else:
raise SgJiraBridgeBadRequestError("Invalid request path %s" % self.path)
if six.ensure_text(settings_name) not in self.server.sync_settings_names:
raise SgJiraBridgeBadRequestError(
"Invalid settings name %s" % settings_name
)
payload = self._read_payload()
if direction == "sg2jira":
# Ensure we get a valid entity_type and entity_id
if not entity_type or not entity_key:
# We need to retrieve this from the payload.
| entity_type = payload.get("entity_type")
entity_key = payload.get("entity_id") | conditional_block |
|
test.ts | securely at run time, either from a
* file on you server or using a service like AWS Secrets Manager.
*
* Create a function that fetches the secret key like below,
* and returns it via the callback or as a promise.
* Be sure to decode from base64 if needed.
*/
// tslint:disable-next-line:max-line-length
const TOKEN_HS512 = 'eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.Hk1Qgr18H-VwmDnMcljqEFy8_F1zeIVS-FY-3Xl2pKsMEeFii5-WEVDyBNRPredB9JjoNAkR23iOkTDN4Mu-Xg'
const getSecretKeyCb = (header: JwtHeader, callback: SigningKeyCallback) => {
const filename = `test/test.secret.${header.alg.toLowerCase()}.b64.txt`
fs.readFile(filename, (err, b64) => {
if (err) {
return callback(err)
}
// toString is important, if the first arg is a buffer the second is ignored
const secret = Buffer.from(b64.toString(), 'base64')
return callback(null, secret)
})
}
const getSecretKeyP = (header: JwtHeader) =>
new Promise((res, rej) => {
getSecretKeyCb(header, (err, secret) => {
if (err) {
rej(err)
} else {
res(secret)
}
})
})
test.cb('Authenticate with secret key, via promise', (t) => {
// Begin by creating your Api Builder as normal
const api = new ApiBuilder()
// Next pass in the authenticator along with your key and any config
api.intercept(authenticator(getSecretKeyP))
// Register your routes as normal
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
// Export the proxyRouter normally in your code
// export handler = api.proxyRouter
// Here we call it instead to test it
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_HS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
test.cb('Authenticate with secret key, via callback', (t) => {
// Begin by creating your Api Builder as normal
const api = new ApiBuilder()
// Next pass in the authenticator along with your key and any config
api.intercept(authenticator(getSecretKeyCb))
// Register your routes as normal
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
// Export the proxyRouter normally in your code
// export handler = api.proxyRouter
// Here we call it instead to test it
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_HS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
/**
* ## JWT Headers & Signature
*
* You can access the headers, payload and signature of the JWT
*/
test.cb('Headers & Signature access', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY))
api.get('/token', (event: APIGatewayProxyEventJwt) => ({
algorithm: event.jwt.header.alg,
signature: event.jwt.signature.substr(0, 12),
subscriber: event.jwt.payload.sub
}))
testApi(t, api, {
context: { method: 'GET', path: '/token' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: JSON.stringify({
algorithm: 'RS512',
signature: 'iNa6ZAKSn3J8',
subscriber: '1234567890'
}),
statusCode: 200
})
})
/**
* ## Extra Config
*
* You can specify more arguments to increase security and help catch bugs
*
* For a full list see https://www.npmjs.com/package/jsonwebtoken#jwtverifytoken-secretorpublickey-options-callback
*/
test.cb('Specify algorithm - success', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { algorithms: ['RS512'] }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
test.cb('Specify algorithm - failure', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { algorithms: ['RS256'] }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, {
body: '"Unauthorised: JsonWebTokenError invalid algorithm"',
statusCode: 401
})
})
test.cb('Specify audience - success', (t) => {
// tslint:disable-next-line:max-line-length
const TOKEN_RS512_AUD = 'eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJhdWQiOiJGNDJFRDA4Mi03OTlGLTQ3NkItQjc3RS0xMjAxM0Y1Mzc5QTUifQ.VAnH9ozEAcL3foiSgqJspqS05AdYchn57uKrbCUEwX9uXbsg8nct9bL7y8Omw6qg5ZdTcNsnor8tysGW460yOmg06Pbx0SRHJifJGLpy1bOCWRPG_5NB5aM6uKf78T2QCJXm9f73nKfZ9QJUlfzW41bT2khnsO8gTVYo9yd3yesrKegMlSomxd4VrZFYz4jbNh2f9FUe8MNkubfOxVbM5U7sh5aZMs_uoef08Gxp3Aqx7fPpzj16uW2JTNlhoIYUF4J33T0SufgiR1Xw3R3Jn2BnwdlfgqjLrv0lxzDzHoPyPP8i6TSl3notTcTmLc_GItdcnLNPn8wtjxKNW81tMQ'
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { audience: 'F42ED082-799F-476B-B77E-12013F5379A5' }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`)
testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512_AUD }
}, {
body: '"Hello John Doe!"',
statusCode: 200
})
})
test.cb('Specify audience - failure', (t) => {
const api = new ApiBuilder()
api.intercept(authenticator(PUBLIC_KEY, { audience: 'F42ED082-799F-476B-B77E-12013F5379A5' }))
api.get('/greeting', (event: APIGatewayProxyEventJwt) => `Hello ${event.jwt.payload.name}!`) | body: '"Unauthorised: JsonWebTokenError jwt audience invalid. expected: F42ED082-799F-476B-B77E | testApi(t, api, {
context: { method: 'GET', path: '/greeting' },
headers: { Authorization: 'bearer ' + TOKEN_RS512 }
}, { | random_line_split |
main.rs | /2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) |
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs | {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
} | identifier_body |
main.rs | directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
}
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
fn image_from_cover_art_response(payload : &str) -> String {
let body : CoverArtResponse = json::decode(&payload).unwrap();
body.images.into_iter().find(|item| item.front).unwrap().image
}
#[test]
fn test_image_from_cover_art_response() {
let payload = "{\"images\":[{\"front\":true,\"image\":\"http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg\"}]}";
let response = image_from_cover_art_response(payload);
assert_eq!("http://coverartarchive.org/release/a146429a-cedc-3ab0-9e41-1aaf5f6cdc2d/3012495605.jpg", response);
}
fn process_mb_response(payload: &str) -> ArtistReference {
let a: ArtistReference = json::decode(payload).unwrap();
a
}
enum Provider {
Musicbrainz,
CoverArt
}
impl Provider {
fn | fs | identifier_name |
|
main.rs | macro_rules! musicbrainz_file {
($id : expr) => ( format!("mb_{}.json", $id))
}
macro_rules! cover_art_url {
($id : expr) => ( format!("http://coverartarchive.org/release-group/{}", $id) )
}
macro_rules! cover_art_file {
($id : expr) => ( format!("ca_{}.json", $id) )
}
#[allow(dead_code)]
#[allow(unused_must_use)]
fn filter_successful(resource: &str, mut resp : hyper::client::response::Response) -> Result<String, TypedIOError>
{
match resp.status {
StatusCode::Ok => {
let mut s = String::new();
resp.read_to_string(&mut s);
Ok(s)
},
code @ _ => Err( TypedIOError {
resource : resource.to_string(),
cause: hyper::Error::Io(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Service responded with statuscode {}", code)))
})
}
}
#[allow(dead_code)]
struct SimpleFs {
directory : String
}
#[allow(dead_code)]
#[allow(unused_must_use)]
impl SimpleFs {
fn read(&self, id: String) -> Result<String, TypedIOError> {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
read_resource_from_file(path.as_path())
}
fn store(&self, id : &str, content: &str) {
std::fs::create_dir_all(Path::new(&self.directory));
let path = Path::new(&self.directory).join(id);
if !path.exists() {
File::create(path)
.and_then(|mut f| f.write_all(content.as_bytes()));
};
}
}
#[allow(unused_must_use)]
#[allow(dead_code)]
fn save_response_to_file(url : &str, content : &str, provider : &Provider) {
let fs = provider.fs();
let id = provider.extract_id(url);
fs.store(&provider.format_file_name(&id), content);
}
trait Meshup {
fn artist_resource_by_id (&self, id : &str) -> String;
fn album_resource_by_id (&self, id : &str) -> String;
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError>;
fn query_cover_art<F>(&self, artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
});
rx
}).collect();
let updated_album_refs: Vec<AlbumReference> = threads.into_iter().map(|thread| {
let item = thread.recv().unwrap();
item.unwrap_or_else(|err| {
println_stderr!("{}", err);
AlbumReference::from(err)
})
}).collect();
updated_album_refs
}
}
struct FileMeshup;
struct WebMeshup;
fn read_resource_from_url(url : &str, provider : &Provider) -> Result<String, TypedIOError> {
println_stderr!("invoking {}", url);
let client = Client::new();
let mime: Mime = "text/json".parse().unwrap();
let response = client.get(url)
.header(ContentType::json())
.header(UserAgent(USER_ARGENT.to_owned()))
.header(Connection::keep_alive())
.header(Accept(vec![qitem(mime)]))
.send()
.map_err(|err| TypedIOError {
resource : url.to_string(),
cause : err
})
.and_then(|resp| filter_successful(url, resp))
.map(|resp| {
if cfg!(feature="meshup_mode_save_web") {
save_response_to_file(url, &resp, provider);
}
resp
});
response
}
impl Meshup for WebMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
cover_art_url!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
musicbrainz_url!(id)
}
fn query(&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_query_url = self.artist_resource_by_id(id);
print!("{}", mb_query_url);
let mb_response = try!(read_resource_from_url(&mb_query_url, &Provider::Musicbrainz).map_err(|err| ArtistError {
artist_id: id.to_string(),
cause: err
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(artist_ref.name.clone(), artist_ref.albums, |id| {
let url = cover_art_url!(id);
read_resource_from_url(&url, &Provider::CoverArt)
});
Ok(ArtistReference {
name : artist_ref.name.clone(),
albums : albums
})
}
}
fn read_resource_from_file(path : &Path) -> Result<String, TypedIOError> {
let mut content = String::new();
File::open(&path)
.and_then(|mut file| file.read_to_string(&mut content))
.map(|_| {
//return the content rather than the size
content
})
.map_err(|err| TypedIOError {
resource : path.to_str().unwrap_or("").to_string(),
cause : hyper::Error::from(err)
})
}
impl Meshup for FileMeshup {
fn album_resource_by_id (&self, id : &str) -> String {
musicbrainz_file!(id)
}
fn artist_resource_by_id (&self, id : &str) -> String {
cover_art_file!(id)
}
fn query (&self, id : &str) -> Result<ArtistReference, ResourceError> {
let mb_file = self.album_resource_by_id(id);
let fs = Provider::Musicbrainz.fs();
let mb_response = try!(fs.read(mb_file).map_err(|err| {
ArtistError {
artist_id : id.to_string(),
cause: err.into()
}
}));
let artist_ref = process_mb_response(&mb_response);
let albums = self.query_cover_art(id.to_string(), artist_ref.albums, |id| {
let file_name = cover_art_file!(id);
let fs = Provider::CoverArt.fs();
fs.read(file_name)
});
Ok(ArtistReference {
name: artist_ref.name,
albums: albums
})
}
}
#[allow(dead_code)]
fn query_cover_art<F>(artist_id: String, list_of_references: Vec<AlbumReference>, cover_art: F) -> Vec<AlbumReference>
where F: Send + 'static + Fn(&str)->Result<String, TypedIOError> + Sync {
let album_references = Arc::new(list_of_references);
let shareable_cover_art = Arc::new(cover_art);
let threads : Vec<_> = album_references.clone().iter().map(|album_reference| {
let mut album = album_reference.clone();
let (tx, rx): (mpsc::Sender<Result<AlbumReference, ResourceError>>, mpsc::Receiver<Result<AlbumReference, ResourceError>>) = mpsc::channel();
let child_cover_art = shareable_cover_art.clone();
let artist_id = artist_id.to_string();
let album_id = album.id.clone();
let album_title = album.title.clone();
thread::spawn(move || {
let result = child_cover_art(&album_id)
.map(|resp| {
album.with_image(image_from_cover_art_response(&resp));
album
})
.map_err(|err| ResourceError::AlbumError {
artist_id : artist_id,
album_id: album_id,
album_title : Some(album_title),
cause: TypedIOError::from(err)
});
tx.send(result)
| ($id : expr) => ( format!("http://musicbrainz.org/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
//($id : expr) => ( format!("http://localhost:8000/ws/2/artist/{}?&fmt=json&inc=url-rels+release-groups", $id))
}
| random_line_split |
|
memory.rs | .
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
// ??? = 0x36;
// ??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn peek(world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO: ???
0,
0x37 =>
// TODO: ???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0;
monster.venomous = value & 0b0010 != 0;
monster.corrupted = value & 0b0001 != 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index) != 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{}, | world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO: ???
{},
0x37 =>
// TODO: ???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character != new.character && old.color != new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character != new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color != new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static [ |
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA => | random_line_split |
memory.rs | .
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
// ??? = 0x36;
// ??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn | (world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO: ???
0,
0x37 =>
// TODO: ???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0;
monster.venomous = value & 0b0010 != 0;
monster.corrupted = value & 0b0001 != 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index) != 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{},
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA =>
world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO: ???
{},
0x37 =>
// TODO: ???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character != new.character && old.color != new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character != new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color != new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static | peek | identifier_name |
memory.rs | .
pub const DOOR_APPEARANCE: u8 = 0x30;
pub const WALL_APPEARANCE: u8 = 0x31;
pub const FLOOR_COLOR: u8 = 0x32;
// u8 to add/subtract from depth when stairs are used; normally 1.
pub const STAIRS_DELTA: u8 = 0x33;
// u8 to add to timers each turn if non-zero; normally 0xff.
pub const TIMER_DELTA: u8 = 0x34;
// s8 to add to each damage roll; normally 0x00.
pub const DAMAGE_OFFSET: u8 = 0x35;
// ??? = 0x36;
// ??? = 0x37;
// The higher this is, the more text gets screwed up.
pub const TEXT_SYNC: u8 = 0x38;
// Player stats.
pub const PLAYER_HP: u8 = 0x39;
pub const PLAYER_TP: u8 = 0x3a;
pub const PLAYER_XLDEF: u8 = 0x3b; // hi-bits XL, lo-bits Def
pub const PLAYER_POSITION: u8 = 0x3c;
pub const PLAYER_DEPTH: u8 = 0x3d;
pub const METAL_ACID_RESISTANCE: u8 = 0x3e; // hi-bits Metal, lo-bits Acid
pub const FIRE_ELEC_RESISTANCE: u8 = 0x3f; // hi-bits Fire, lo-bits Elect
pub fn peek(world: &World, address: u8) -> u8 {
match address {
PLAYER_APPEARANCE =>
world.player_appearance_byte,
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize],
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &world.current_level()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => ((monster.kind as u8) << 4)
| ((monster.charged as u8) << 3)
| ((monster.vulnerable as u8) << 2)
| ((monster.venomous as u8) << 1)
| monster.corrupted as u8,
MONSTER_POSITION => monster.position.as_byte(),
MONSTER_HP => monster.hp,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
world.player.spell_memory.iter().enumerate()
.map(|(index, &known)| (known as u8) << index).sum(),
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
0,
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize],
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte,
DOOR_APPEARANCE =>
world.door_appearance_byte,
WALL_APPEARANCE =>
world.wall_appearance_byte,
FLOOR_COLOR =>
// TODO: remove floor color
0,
STAIRS_DELTA =>
world.player.stairs_delta,
TIMER_DELTA =>
world.player.timer_delta,
DAMAGE_OFFSET =>
unsafe { transmute(world.player.damage_offset) },
0x36 =>
// TODO: ???
0,
0x37 =>
// TODO: ???
0,
TEXT_SYNC =>
world.player.text_sync,
PLAYER_HP =>
world.player.hp,
PLAYER_TP =>
world.player.tp,
PLAYER_XLDEF =>
(world.player.xl << 4) | unsafe { transmute::<i8,u8>(world.player.def) },
PLAYER_POSITION =>
world.player.position.as_byte(),
PLAYER_DEPTH =>
world.player.depth,
METAL_ACID_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[0] << 4)
| (world.player.aptitude[1] & 0x0f))
},
FIRE_ELEC_RESISTANCE =>
// TODO: use element names
unsafe {
transmute((world.player.aptitude[2] << 4)
| (world.player.aptitude[3] & 0x0f))
},
_ => panic!("memory::peek - invalid address {}", address)
}
}
pub fn poke(world: &mut World, address: u8, value: u8) | monster.venomous = value & 0b0010 != 0;
monster.corrupted = value & 0b0001 != 0;
},
MONSTER_POSITION => monster.position = Point::of_byte(value),
MONSTER_HP => monster.hp = value,
_ => unreachable!()
}
},
_ if address >= SPELL_MEMORY && address < IDENTIFICATION =>
for (index, known) in world.player.spell_memory.iter_mut().enumerate() {
*known = value & (1 << index) != 0
},
_ if address >= IDENTIFICATION && address < TIMERS =>
// TODO: implement identification
{},
_ if address >= TIMERS && address < INVENTORY =>
world.player.timer[(address - TIMERS) as usize] = value,
_ if address >= INVENTORY && address < DOOR_APPEARANCE =>
world.player.inventory.slots[(address - INVENTORY) as usize].byte = value,
DOOR_APPEARANCE =>
world.door_appearance_byte = value,
WALL_APPEARANCE =>
world.wall_appearance_byte = value,
FLOOR_COLOR =>
// TODO: remove floor color
{},
STAIRS_DELTA =>
world.player.stairs_delta = value,
TIMER_DELTA =>
world.player.timer_delta = value,
DAMAGE_OFFSET =>
world.player.damage_offset = unsafe { transmute(value) },
0x36 =>
// TODO: ???
{},
0x37 =>
// TODO: ???
{},
TEXT_SYNC =>
world.player.text_sync = value,
PLAYER_HP =>
world.player.hp = value,
PLAYER_TP =>
world.player.tp = value,
PLAYER_XLDEF => {
world.player.xl = value >> 4;
world.player.def = upcast_i4(value)
},
PLAYER_POSITION =>
world.player.position = Point::of_byte(value),
PLAYER_DEPTH =>
world.player.depth = value,
METAL_ACID_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[0] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[1] = upcast_i4(value)
},
FIRE_ELEC_RESISTANCE => {
// TODO: use element names
// note: transmute before shift for sign-extension
world.player.aptitude[2] = unsafe { transmute::<u8, i8>(value) } >> 4;
world.player.aptitude[3] = upcast_i4(value)
},
_ => panic!("memory::poke - invalid address")
}
}
// pretend the low four bits of our u8 argument are an "i4" and sign-extend to i8
fn upcast_i4(the_i4: u8) -> i8 {
(unsafe { transmute::<u8, i8>(the_i4) } << 4) >> 4
}
fn report_player_appearance_change(world: &mut World, old: Sprite, new: Sprite) {
let new_color_name = util::color_name(new.color[0]);
let new_char_name = util::punctuation_name(new.character);
if old.character != new.character && old.color != new.color {
world.log.tell(format!("You turn into {} {}!", util::a_or_an(new_color_name), new_char_name));
} else if old.character != new.character {
world.log.tell(format!("You turn into {}!", util::a_or_an(new_char_name)));
} else if old.color != new.color {
world.log.tell(format!("You turn {}!", new_color_name));
}
}
const CP437: &'static | {
match address {
PLAYER_APPEARANCE => {
let old_sprite = Sprite::of_byte(world.player_appearance_byte, true);
let new_sprite = Sprite::of_byte(value, true);
report_player_appearance_change(world, old_sprite, new_sprite);
world.player_appearance_byte = value;
},
_ if address >= PLAYER_NAME && address < MONSTERS =>
world.player.name[(address - PLAYER_NAME) as usize] = value,
_ if address >= MONSTERS && address < SPELL_MEMORY => {
let monster = &mut world.current_level_mut()
.monsters[(address - MONSTERS) as usize / 3];
match (address - MONSTERS) % 3 {
MONSTER_FLAGS => {
monster.kind = unsafe { transmute(value >> 4) };
monster.charged = value & 0b1000 != 0;
monster.vulnerable = value & 0b0100 != 0; | identifier_body |
client.rs | vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &str) {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if !printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&form | at!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[ | identifier_body |
|
client.rs | for c in &x.commands {
commands.push(command_to_json(&c));
}
map_to_json(vec![
("role", format!("{}", x.role)),
("x", format!("{}", x.pos.0)),
("y", format!("{}", x.pos.1)),
("vx", format!("{}", x.v.0)),
("vy", format!("{}", x.v.1)),
("status", params_to_json(&x.status)),
("heat", format!("{}", x.heat)),
("max_heat", format!("{}", x.max_heat)),
("max_accelarate", format!("{}", x.max_accelarate)),
("commands", format!("[{}]", commands.connect(","))),
])
}
#[derive(Debug, Clone)]
pub enum Command {
Accelerate(i32, (i32, i32)),
Detonate(i32, Option<(i32, i32)>), // 1, (impact, 32)
Shoot(i32, (i32, i32), i32, Option<(i32, i32)>), // 2, target, power, (impact, 4)
Split(i32, Params),
Unknown,
}
fn command_to_json(x: &Command) -> String {
match x {
Command::Accelerate(id, (x, y)) => format!(
"{{\"type\":\"accelerate\",\"id\":{},\"x\":{},\"y\":{}}}", id, x, y),
Command::Detonate(id, _) => format!(
"{{\"type\":\"detonate\",\"id\":{}}}", id),
Command::Shoot(id, (x, y), power, _) => format!(
"{{\"type\":\"shoot\",\"id\":{},\"x\":{},\"y\":{},\"power\":{}}}",
id, x, y, power),
Command::Split(id, params) => format!(
"{{\"type\":\"split\",\"id\":{},\"params\":{}}}",
id, params_to_json(¶ms)),
_ => format!("{{}}"),
}
}
#[derive(Debug, Clone)]
pub struct Params {
pub energy: i32,
pub power: i32,
pub cool: i32,
pub life: i32,
}
fn params_to_json(x: &Params) -> String {
format!("{{\"energy\":{},\"power\":{},\"cool\":{},\"life\":{}}}",
x.energy, x.power, x.cool, x.life)
}
fn map_to_json(m: Vec<(&str, String)>) -> String {
let mut kvs = Vec::new();
for kv in m {
kvs.push(format!("\"{}\":{}", kv.0, kv.1));
}
format!("{{{}}}", kvs.join(","))
}
impl std::fmt::Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Accelerate(id, v) => write!(f, "[0, {}, <{}, {}>]", id, v.0, v.1)?,
Command::Detonate(id, None) => write!(f, "[1, {}]", id)?,
Command::Detonate(id, Some((a, b))) => write!(f, "[1, {}, {}, {}]", id, a, b)?,
Command::Shoot(id, t, p, None) => write!(f, "[2, {}, <{}, {}>, {}]", id, t.0, t.1, p)?,
Command::Shoot(id, t, p, Some((a, b))) => {
write!(f, "[2, {}, <{}, {}>, {}, {}, {}]", id, t.0, t.1, p, a, b)?
}
Command::Split(id, params) => write!(
f,
"[3, {}, [{}, {}, {}, {}]]",
id, params.energy, params.power, params.cool, params.life
)?,
_ => {
panic!("unreachable");
}
}
Ok(())
}
}
impl From<&E> for Command {
fn from(e: &E) -> Command {
let e = get_list(e).unwrap();
match get_num(&e[0]) {
0 => Command::Accelerate(-1, get_pair(&e[1])),
1 => Command::Detonate(
-1,
if e.len() < 3 {
None
} else {
Some((get_num(&e[1]), get_num(&e[2])))
},
),
2 => Command::Shoot(
-1,
get_pair(&e[1]),
get_num(&e[2]),
if e.len() < 5 {
None
} else {
Some((get_num(&e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &s | {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if !printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power | tr) | identifier_name |
client.rs | e[3]), get_num(&e[4])))
},
),
3 => {
let params = get_list(&e[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
Command::Split(
-1,
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
},
)
}
_ => Command::Unknown,
}
}
}
pub struct Client {
server_url: String,
player_key: String,
file: Option<RefCell<BufWriter<File>>>,
client: reqwest::Client,
}
impl Client {
pub fn new(server_url: String) -> Self {
let server_url = if server_url.contains("?apiKey") {
server_url
} else {
server_url + "/aliens/send"
};
Self {
server_url,
player_key: String::new(),
file: None,
client: reqwest::Client::new(),
}
}
pub fn gui(&self, name: &str, msg: &str) {
if let Ok(_) = env::var("JUDGE_SERVER") {
return;
}
let t = match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Ok(t) => t.as_nanos(),
_ => 0,
};
let msg = format!("###GUI\t{}\t{}\t{}\t{}\n", t, self.player_key, name, msg);
let mut printed = false;
if let Some(f) = &self.file {
f.borrow_mut()
.write_all(msg.as_bytes())
.expect("Failed to write to file");
printed = true;
}
if let Ok(_) = env::var("GUI") {
print!("{}", &msg);
} else if !printed {
print!("{}", &msg);
}
}
pub fn send(&self, msg: &str) -> E {
eprintln!("send: {}", msg);
let msg = to_text(&parse_lisp(msg).0);
let ss = msg.split_whitespace().collect::<Vec<_>>();
let (exp, n) = parser::parse(&ss, 0);
assert_eq!(n, ss.len());
let e = parser::eval(&exp, true);
let msg = modulation::modulate(&e);
// eprintln!("send: {}", msg);
if let Ok(mut guard) = last_send.lock() {
if let Some(t) = guard.clone() {
let duration = Utc::now() - t;
if duration.num_milliseconds() > 500 {
eprintln!("############################################################");
eprintln!("AI took too much CPU time! ({} ms)", duration.num_milliseconds());
eprintln!("############################################################");
}
eprintln!("AI took {} ms.", duration.num_milliseconds());
} else {
eprintln!("First send request.");
}
}
let resp = self
.client
.post(&self.server_url)
.body(msg)
.send()
.unwrap()
.text()
.unwrap();
if let Ok(mut guard) = last_send.lock() {
*guard = Some(Utc::now());
}
// eprintln!("resp: {}", resp);
let resp = modulation::demodulate(&resp);
eprintln!("resp: {}", &resp);
if let Some(state) = &resp.into_iter().skip(3).next() {
if let Some(ship_and_cmds) = state.into_iter().skip(2).next() {
for ship_and_cmd in ship_and_cmds {
eprintln!("ship: {}", &ship_and_cmd);
}
}
}
resp
}
pub fn join(&mut self, player_key: &str) -> Response {
self.player_key = player_key.to_owned();
if let Err(_) = env::var("JUDGE_SERVER") {
self.file = Some(RefCell::new(BufWriter::new(
File::create(&format!("out/{}", self.player_key)).expect("out directory is missing"),
)));
}
let resp = self.send(&format!("[2, {}, [192496425430, 103652820]]", player_key));
parse(resp)
}
pub fn start(&self, energy: i32, power: i32, cool: i32, life: i32) -> Response {
let resp = self.send(&format!(
"[3, {}, [{}, {}, {}, {}]]",
self.player_key, energy, power, cool, life
));
parse(resp)
}
pub fn command(&self, cs: &[Command]) -> Response {
let resp = self.send(&format!(
"[4, {}, [{}]]",
self.player_key,
cs.iter().join(", ")
));
let resp = parse(resp);
self.gui("RESP", &response_to_json(&resp));
return resp;
}
}
pub fn get_num(a: &E) -> i32 {
if let E::Num(a) = a {
*a as i32
} else {
panic!("not number");
}
}
pub fn get_pair(a: &E) -> (i32, i32) {
if let E::Pair(a, b) = a {
(get_num(a), get_num(b))
} else {
panic!("not pair");
}
}
pub fn parse(e: E) -> Response {
let a = get_list(&e).unwrap();
assert_eq!(a.len(), 4);
assert_eq!(get_num(&a[0]), 1);
let stage = get_num(&a[1]);
let info = get_list(&a[2]).unwrap();
let deadline = get_num(&info[0]);
let role = get_num(&info[1]);
let ability = get_list(&info[2])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let ability = Ability {
potential: ability[0],
max_heat: ability[1],
max_accelarate: ability[2],
};
let range = get_list(&info[3])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let range = range[0]..range[1];
let params = get_list(&info[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let opponent_params = if params.len() != 4 {
Params {
energy: -1,
power: -1,
cool: -1,
life: -1,
}
} else {
Params {
energy: params[0],
power: params[1],
cool: params[2],
life: params[3],
}
};
let state = get_list(&a[3]).unwrap();
let (tick, strange, ships) = if state.len() > 0 {
let tick = get_num(&state[0]);
let strange = get_list(&state[1])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<i32>>();
let strange = strange[0]..strange[1];
let ships = get_list(&state[2])
.unwrap()
.into_iter()
.map(|a| {
let tmp = get_list(&a).unwrap();
let s = get_list(&tmp[0]).unwrap();
let commands = get_list(&tmp[1]).unwrap();
let role = get_num(&s[0]);
let id = get_num(&s[1]); // shipId
let pos = get_pair(&s[2]);
let v = get_pair(&s[3]);
let status = get_list(&s[4])
.unwrap()
.into_iter()
.map(|e| get_num(&e))
.collect::<Vec<_>>();
let status = Params {
energy: status[0],
power: status[1],
cool: status[2],
life: status[3],
};
let heat = get_num(&s[5]);
let max_heat = get_num(&s[6]); | let max_accelarate = get_num(&s[7]);
// [1, 1, [256, 1, [448, 2, 128], [16, 128], []], [1, [16, 128], [[[1, 0, <34, -46>, <0, 2>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]], [[0, 1, <-34, 48>, <0, 0>, [445, 0, 0, 1], 8, 128, 2], [[0, <0, -1>]]]]]] | random_line_split |
|
xds.go | or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func | (opts ClientOptions) (credentials.TransportCredentials, error) {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
}
// credsImpl is an implementation of the credentials.TransportCredentials
// interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
}
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage.
| NewClientCredentials | identifier_name |
xds.go | or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func NewClientCredentials(opts ClientOptions) (credentials.TransportCredentials, error) {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
}
// credsImpl is an implementation of the credentials.TransportCredentials
// interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
}
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil |
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage | {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
} | conditional_block |
xds.go | target or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func NewClientCredentials(opts ClientOptions) (credentials.TransportCredentials, error) {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
}
// credsImpl is an implementation of the credentials.TransportCredentials |
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage.
| // interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
} | random_line_split |
xds.go | or when the xDS
// server does not return any security configuration. Attempts to create
// client credentials without a fallback credentials will fail.
FallbackCreds credentials.TransportCredentials
}
// NewClientCredentials returns a new client-side transport credentials
// implementation which uses xDS APIs to fetch its security configuration.
func NewClientCredentials(opts ClientOptions) (credentials.TransportCredentials, error) |
// credsImpl is an implementation of the credentials.TransportCredentials
// interface which uses xDS APIs to fetch its security configuration.
type credsImpl struct {
isClient bool
fallback credentials.TransportCredentials
}
// handshakeAttrKey is the type used as the key to store HandshakeInfo in
// the Attributes field of resolver.Address.
type handshakeAttrKey struct{}
// SetHandshakeInfo returns a copy of addr in which the Attributes field is
// updated with hInfo.
func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address {
addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo)
return addr
}
// getHandshakeInfo returns a pointer to the HandshakeInfo stored in attr.
func getHandshakeInfo(attr *attributes.Attributes) *HandshakeInfo {
v := attr.Value(handshakeAttrKey{})
hi, _ := v.(*HandshakeInfo)
return hi
}
// HandshakeInfo wraps all the security configuration required by client and
// server handshake methods in credsImpl. The xDS implementation will be
// responsible for populating these fields.
//
// Safe for concurrent access.
type HandshakeInfo struct {
mu sync.Mutex
rootProvider certprovider.Provider
identityProvider certprovider.Provider
acceptedSANs map[string]bool // Only on the client side.
}
// SetRootCertProvider updates the root certificate provider.
func (hi *HandshakeInfo) SetRootCertProvider(root certprovider.Provider) {
hi.mu.Lock()
hi.rootProvider = root
hi.mu.Unlock()
}
// SetIdentityCertProvider updates the identity certificate provider.
func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) {
hi.mu.Lock()
hi.identityProvider = identity
hi.mu.Unlock()
}
// SetAcceptedSANs updates the list of accepted SANs.
func (hi *HandshakeInfo) SetAcceptedSANs(sans []string) {
hi.mu.Lock()
hi.acceptedSANs = make(map[string]bool, len(sans))
for _, san := range sans {
hi.acceptedSANs[san] = true
}
hi.mu.Unlock()
}
// UseFallbackCreds returns true when fallback credentials are to be used based
// on the contents of the HandshakeInfo.
func (hi *HandshakeInfo) UseFallbackCreds() bool {
if hi == nil {
return true
}
hi.mu.Lock()
defer hi.mu.Unlock()
return hi.identityProvider == nil && hi.rootProvider == nil
}
func (hi *HandshakeInfo) validate(isClient bool) error {
hi.mu.Lock()
defer hi.mu.Unlock()
// On the client side, rootProvider is mandatory. IdentityProvider is
// optional based on whether the client is doing TLS or mTLS.
if isClient && hi.rootProvider == nil {
return errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
// On the server side, identityProvider is mandatory. RootProvider is
// optional based on whether the server is doing TLS or mTLS.
if !isClient && hi.identityProvider == nil {
return errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server")
}
return nil
}
func (hi *HandshakeInfo) makeTLSConfig(ctx context.Context) (*tls.Config, error) {
hi.mu.Lock()
// Since the call to KeyMaterial() can block, we read the providers under
// the lock but call the actual function after releasing the lock.
rootProv, idProv := hi.rootProvider, hi.identityProvider
hi.mu.Unlock()
// InsecureSkipVerify needs to be set to true because we need to perform
// custom verification to check the SAN on the received certificate.
// Currently the Go stdlib does complete verification of the cert (which
// includes hostname verification) or none. We are forced to go with the
// latter and perform the normal cert validation ourselves.
cfg := &tls.Config{InsecureSkipVerify: true}
if rootProv != nil {
km, err := rootProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err)
}
cfg.RootCAs = km.Roots
}
if idProv != nil {
km, err := idProv.KeyMaterial(ctx)
if err != nil {
return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err)
}
cfg.Certificates = km.Certs
}
return cfg, nil
}
func (hi *HandshakeInfo) matchingSANExists(cert *x509.Certificate) bool {
if len(hi.acceptedSANs) == 0 {
// An empty list of acceptedSANs means "accept everything".
return true
}
var sans []string
// SANs can be specified in any of these four fields on the parsed cert.
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
hi.mu.Lock()
defer hi.mu.Unlock()
for _, san := range sans {
if hi.acceptedSANs[san] {
return true
}
}
return false
}
// NewHandshakeInfo returns a new instance of HandshakeInfo with the given root
// and identity certificate providers.
func NewHandshakeInfo(root, identity certprovider.Provider, sans ...string) *HandshakeInfo {
acceptedSANs := make(map[string]bool, len(sans))
for _, san := range sans {
acceptedSANs[san] = true
}
return &HandshakeInfo{
rootProvider: root,
identityProvider: identity,
acceptedSANs: acceptedSANs,
}
}
// ClientHandshake performs the TLS handshake on the client-side.
//
// It looks for the presence of a HandshakeInfo value in the passed in context
// (added using a call to NewContextWithHandshakeInfo()), and retrieves identity
// and root certificates from there. It also retrieves a list of acceptable SANs
// and uses a custom verification function to validate the certificate presented
// by the peer. It uses fallback credentials if no HandshakeInfo is present in
// the passed in context.
func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if !c.isClient {
return nil, nil, errors.New("ClientHandshake() is not supported for server credentials")
}
// The CDS balancer constructs a new HandshakeInfo using a call to
// NewHandshakeInfo(), and then adds it to the attributes field of the
// resolver.Address when handling calls to NewSubConn(). The transport layer
// takes care of shipping these attributes in the context to this handshake
// function. We first read the credentials.ClientHandshakeInfo type from the
// context, which contains the attributes added by the CDS balancer. We then
// read the HandshakeInfo from the attributes to get to the actual data that
// we need here for the handshake.
chi := credentials.ClientHandshakeInfoFromContext(ctx)
// If there are no attributes in the received context or the attributes does
// not contain a HandshakeInfo, it could either mean that the user did not
// specify an `xds` scheme in their dial target or that the xDS server did
// not provide any security configuration. In both of these cases, we use
// the fallback credentials specified by the user.
if chi.Attributes == nil {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
hi := getHandshakeInfo(chi.Attributes)
if hi.UseFallbackCreds() {
return c.fallback.ClientHandshake(ctx, authority, rawConn)
}
if err := hi.validate(c.isClient); err != nil {
return nil, nil, err
}
// We build the tls.Config with the following values
// 1. Root certificate as returned by the root provider.
// 2. Identity certificate as returned by the identity provider. This may be
// empty on the client side, if the client is not doing mTLS.
// 3. InsecureSkipVerify to true. Certificates used in Mesh environments
// usually contains the identity of the workload presenting the
// certificate as a SAN (instead of a hostname in the CommonName field).
// This means that normal certificate verification as done by the
// standard library will fail.
// 4. Key usage to match whether client/server usage | {
if opts.FallbackCreds == nil {
return nil, errors.New("missing fallback credentials")
}
return &credsImpl{
isClient: true,
fallback: opts.FallbackCreds,
}, nil
} | identifier_body |
oscillator.py | 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
"-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
| create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder() | conditional_block |
|
oscillator.py |
U_original_pump = np.copy(U)
# Pass the original pump through the WDM1, port1 is in to the loop, port2
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
u, U = WDM_vec[0].pass_through((U, noise_new))[0]
ro = -1
t_total = 0
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
dAdzmm.factors_xpm, dAdzmm.factors_fwm, dAdzmm.gama, dAdzmm.tsh, dAdzmm.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop/2)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
while ro < max_rounds:
ro += 1
print('round', ro)
pulse_pos_dict = [
'round ' + str(ro) + ', ' + i for i in pulse_pos_dict_or]
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '1', pulse_pos_dict[3], D_pic[5], plots)
# Phase modulate before the Fibre
U = pm_fopa.modulate(U)
u = ifft(ifftshift(U, axes=-1))
#Pulse propagation
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '2', pulse_pos_dict[0], D_pic[2], plots)
max_noise = 10*noise_new.max()
#checks if the fft's are causing boundary condtion problems
if (U[:, 0] > max_noise).any() or (U[:, -1] > max_noise).any():
with open("error_log", "a") as myfile:
myfile.write("Pump: %5f, Seed: %5f, lamp: %5f, lams: %5f \n" % (
P0_p1, P0_s, 1e-3*c/f_p, 1e-3*c/f_s))
break
# pass through WDM2 port 2 continues and port 1 is out of the loop
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(out1, out2), (u, U) = WDM_vec[1].pass_through(
(U, noise_new))
ex.exporter(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_f | random_line_split |
||
oscillator.py | er(index, int_fwm, sim_wind, u, U, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '3', pulse_pos_dict[3], D_pic[3], plots)
# Splice7 after WDM2 for the signal
noise_new = noise_obj.noise_func_freq(int_fwm, sim_wind)
(u, U) = splicers_vec[2].pass_through(
(U, noise_new))[0]
#Phase modulate the oscillating signal so that to be in phase with the one coming in
U = pm_WDM1.modulate(U_original_pump, U)
# Pass again through WDM1 with the signal now
(u, U) = WDM_vec[0].pass_through(
(U_original_pump, U))[0]
################################The outbound stuff#####################
ex.exporter(index, int_fwm, sim_wind, out1, out2, P0_p1,
P0_s, f_p, f_s, ro, mode_names, master_index, str(ro) + '4', pulse_pos_dict[4], D_pic[6], plots)
consolidate(ro, int_fwm,master_index, index)
return ro
def calc_P_out(U, U_original_pump, fv, t):
U = np.abs(U)**2
U_original_pump = np.abs(U_original_pump)**2
freq_band = 2
fp_id = np.where(U_original_pump == np.max(U_original_pump))[0][0]
plom = fp_id + 10
fv_id = np.where(U[plom:] == np.max(U[plom:]))[0][0]
fv_id += plom - 1
start, end = fv[fv_id] - freq_band, fv[fv_id] + freq_band
i = np.where(
np.abs(fv - start) == np.min(np.abs(fv - start)))[0][0]
j = np.where(
np.abs(fv - end) == np.min(np.abs(fv - end)))[0][0]
E_out = simps(U[i:j] * (t[1] - t[0])**2, fv[i:j])
P_out = E_out / (2 * np.abs(np.min(t)))
return P_out
@unpack_args
def formulate(index, n2, gama, alphadB, z, P_p, P_s, TFWHM_p, TFWHM_s, spl_losses, betas,
lamda_c, WDMS_pars, lamp, lams, num_cores, maxerr, ss, plots,
N, nplot, master_index, filesaves, Df_band, fr, fopa):
"------------------propagation paramaters------------------"
dzstep = z / nplot # distance per step
dz_less = 1e2
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, nplot, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
"-----------------------------f-----------------------------"
"---------------------Aeff-Qmatrixes-----------------------"
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda_c, gama)
"----------------------------------------------------------"
"---------------------Grid&window-----------------------"
P_p_bef,P_s_bef = pre_fibre_init_power(WDMS_pars[0][0], WDMS_pars[0][1], lamp, P_p, P_s)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p_bef,P_s_bef, Df_band)
print(fv[0][1] - fv[0][0])
#print(1e-3 * c / np.array(f_centrals))
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
#if ss == 0:
# int_fwm.gama[:] = -1j * n2 * 2 * M * pi * (1e12 * f_centrals[3]) / (c)
int_fwm.gama[0:2] = 0
int_fwm.gama[5:] = 0
#for i in range(len(int_fwm.gama)):
# print(i, int_fwm.gama[i])
#exit()
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
"----------------------------------------------------------"
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
"----------------Construct the integrator----------------"
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
"--------------------------------------------------------"
"----------------------Formulate WDMS--------------------"
if WDMS_pars == 'signal_locked':
Omega = 2 * pi * c / (lamp * 1e-9) - 2 * pi * c / (lams * 1e-9)
omegai = 2 * pi * c / (lamp * 1e-9) + Omega
lami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
| "-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
| identifier_body |
|
oscillator.py | ami = 1e9 * 2 * pi * c / (omegai)
WDMS_pars = ([lamp, lams], # WDM up downs in wavelengths [m]
[lami, lams],
[lami, lamp],
[lami, lams])
WDM_vec = [WDM(i[0], i[1], sim_wind.fv, c,fopa)
for i in WDMS_pars] # WDM up downs in wavelengths [m]
# Phase modulators contructors
pm_fopa = Phase_modulation_FOPA(sim_wind.fv, where)
pm_WDM1 = Phase_modulation_infase_WDM(P_s, where, WDM_vec[0])
"--------------------------------------------------------"
# for ei,i in enumerate(WDM_vec):
# i.plot(filename = str(ei))
"----------------------Formulate splicers--------------------"
splicers_vec = [Splicer(loss=i) for i in spl_losses]
"------------------------------------------------------------"
f_p, f_s = sim_wind.fv[where[0][0], where[0][1]], sim_wind.fv[where[1][0], where[1][1]]
ex = Plotter_saver(plots, filesaves, sim_wind.fv,
sim_wind.t) # construct exporter
ro = oscilate(sim_wind, int_fwm, noise_obj, TFWHM_p, TFWHM_s, index, master_index, P_p, P_s, f_p, f_s, p_pos, s_pos, splicers_vec,
WDM_vec, Dop, non_integrand, D_pic, pulse_pos_dict_or, plots, ex, pm_fopa, pm_WDM1,fopa)
return None
def main():
"-----------------------------Stable parameters----------------------------"
# Number of computing cores for sweep
num_cores = arguments_determine(1)
# maximum tolerable error per step in integration
maxerr = 1e-13
ss = 1 # includes self steepening term
Df_band_vec = [5, 5, 10, 20]
fr = 0.18
plots = False # Do you want plots, (slow!)
filesaves = True # Do you want data dump?
complete = False
nplot = 1 # number of plots within fibre min is 2
if arguments_determine(-1) == 0:
fopa = True # If no oscillations then the WDMs are deleted to
# make the system in to a FOPA
else:
fopa = False
if 'mpi' in sys.argv:
method = 'mpi'
elif 'joblib' in sys.argv:
method = 'joblib'
else:
method = 'single'
"--------------------------------------------------------------------------"
stable_dic = {'num_cores': num_cores, 'maxerr': maxerr, 'ss': ss, 'plots': plots,
'nplot': nplot, 'filesaves': filesaves,
'fr':fr, 'fopa':fopa}
"------------------------Can be variable parameters------------------------"
n2 = 2.5e-20 # Nonlinear index [m/W]
gama = 10e-3 # Overwirtes n2 and Aeff w/m
alphadB = 0 # 0.0011667#666666666668 # loss within fibre[dB/m]
z = 18 # Length of the fibre
wave_idx = 0
power_area_idx = 0
N = np.array([i for i in range(2,13)]) # 2**N grid points
# Power list. [wavelength, power_area]
P_p_vec = [[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 3.9, 0.1), my_arange(4, 4.5, 0.05),
my_arange(4.6, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ],
[my_arange(3.5, 4.4, 0.1), my_arange(4.5, 5, 0.05),
my_arange(5.1, 8.1 ,0.1), my_arange(8.2,12 ,0.1 ) ]]
Df_band = Df_band_vec[power_area_idx]
P_p = P_p_vec[wave_idx][power_area_idx]
P_p = [6]#[4.9,4.95,5]
P_s = 0#100e-3
TFWHM_p = 0 # full with half max of pump
TFWHM_s = 0 # full with half max of signal
# loss of each type of splices [dB]
spl_losses = [0, 0, 1.4]
betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]
-1.002e-4, 3.671e-7]) * 1e-3
lamda_c = 1051.85e-9
# Zero dispersion wavelength [nm]
# max at ls,li = 1095, 1010
WDMS_pars = ([1048., 1204.16],
[927.7, 1204.16]) # WDM up downs in wavelengths [m]
lamp_vec = [1046,1047, 1048, 1049, 1050]
lamp = [lamp_vec[wave_idx]]
lams = ['lock' for i in range(len(lamp))]
lamp = lamp_vec[wave_idx]
lams = 'lock'
var_dic = {'n2': n2, 'gama': gama, 'alphadB': alphadB, 'z': z, 'P_p': P_p,
'P_s': P_s, 'TFWHM_p': TFWHM_p, 'TFWHM_s': TFWHM_s,
'spl_losses': spl_losses, 'betas': betas,
'lamda_c': lamda_c, 'WDMS_pars': WDMS_pars,
'lamp': lamp, 'lams': lams, 'N':N, 'Df_band': Df_band}
"--------------------------------------------------------------------------"
outside_var_key = 'P_p'
inside_var_key = 'N'
inside_var = var_dic[inside_var_key]
outside_var = var_dic[outside_var_key]
del var_dic[outside_var_key]
del var_dic[inside_var_key]
"----------------------------Simulation------------------------------------"
D_ins = [{'index': i, inside_var_key: insvar}
for i, insvar in enumerate(inside_var)]
large_dic = {**stable_dic, **var_dic}
if len(inside_var) < num_cores:
num_cores = len(inside_var)
profiler_bool = arguments_determine(0)
for kk, variable in enumerate(outside_var):
create_file_structure(kk)
_temps = create_destroy(inside_var, str(kk))
_temps.prepare_folder()
large_dic['lams'] = lams[kk]
large_dic['master_index'] = kk
large_dic[outside_var_key] = variable
if profiler_bool:
for i in range(len(D_ins)):
formulate(**{**D_ins[i], ** large_dic})
elif method == 'mpi':
iterables = ({**D_ins[i], ** large_dic} for i in range(len(D_ins)))
with MPIPoolExecutor() as executor:
A = executor.map(formulate, iterables)
else:
A = Parallel(n_jobs=num_cores)(delayed(formulate)(**{**D_ins[i], ** large_dic}) for i in range(len(D_ins)))
_temps.cleanup_folder()
print('\a')
return None
class Band_predict(object):
def | __init__ | identifier_name |
|
server.go | , map[string]string{"error": message})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
//GetReper recupera il reperibile attuale per la piattaforma
//passata come argomento
func GetReper(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
vars := mux.Vars(r)
piattaforma := vars["piatta"]
reperibile, err := reperibili.GetReperibile(piattaforma)
if err != nil {
respondWithError(w, http.StatusNoContent, err.Error())
return
}
result := fmt.Sprintf("Il reperibile per %s è: %s. Cell: %s", piattaforma, reperibile.Cognome, reperibile.Cellulare)
respondWithJSON(w, http.StatusFound, result)
return
}
//SetReper inserisce reperibilità in un archivio condiviso
func SetReper(w http.ResponseWriter, r *http.Request) {
/* var p reperibili.Contatto
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close() */
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
r.ParseForm()
nome := r.PostFormValue("nome")
cognome := r.PostFormValue("cognome")
cellulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func Creat | tp.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p | eNotificaNoVoiceCall(w ht | identifier_name |
server.go | ")
w.WriteHeader(code)
w.Write(response)
}
//GetReper recupera il reperibile attuale per la piattaforma
//passata come argomento
func GetReper(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
vars := mux.Vars(r)
piattaforma := vars["piatta"]
reperibile, err := reperibili.GetReperibile(piattaforma)
if err != nil {
respondWithError(w, http.StatusNoContent, err.Error())
return
}
result := fmt.Sprintf("Il reperibile per %s è: %s. Cell: %s", piattaforma, reperibile.Cognome, reperibile.Cellulare)
respondWithJSON(w, http.StatusFound, result)
return
}
//SetReper inserisce reperibilità in un archivio condiviso
func SetReper(w http.ResponseWriter, r *http.Request) {
/* var p reperibili.Contatto
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close() */
w.Header().Set("Cache-Control", "no-cache, private, max-age=0")
w.Header().Set("Expires", time.Unix(0, 0).Format(http.TimeFormat))
w.Header().Set("Pragma", "no-cache")
w.Header().Set("X-Accel-Expires", "0")
r.ParseForm()
nome := r.PostFormValue("nome")
cognome := r.PostFormValue("cognome")
cellulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func CreateNotificaNoVoiceCall(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
| random_line_split |
||
server.go | ulare := r.PostFormValue("cellulare")
piattaforma := r.PostFormValue("piattaforma")
oggi := time.Now().Format("20060102")
err := reperibili.AddRuota(nome, cognome, cellulare, piattaforma, oggi, "gruppo6")
if err != nil {
fmt.Println("errorone", err.Error(), cellulare)
return
}
fmt.Println("inserito reperibile: ", nome, cognome, cellulare)
/* err := reperibili.AddRuota(p.Nome, p.Cognome, p.Cellulare, "CDN", "20180101", "gruppo6")
if err != nil {
fmt.Println("errorone")
}
*/
/* fmt.Println(p)
respondWithJSON(w, http.StatusCreated, p) */
return
}
//Callfile sends the file gerated for asterisk
func Callfile(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "/tmp/exampleTest.call")
return
}
//LogNotifica archivia la notifica in un Database
func LogNotifica(n Notifica) (err error) {
//recupera il timestamp di adesso
timestamp := time.Now()
//apre il database
database, _ := sql.Open("sqlite3", "./sarumann.db")
//chiudi Database una volta fatto
defer database.Close()
//prepara la creazione della tabella notifiche se non esite
statement, _ := database.Prepare("CREATE TABLE IF NOT EXISTS notifiche (id INTEGER PRIMARY KEY, server TEXT, servizio TEXT, piattaforma TEXT, reperibile TEXT, cellulare TEXT, messaggio TEXT, timestamp INT)")
//esegue la creazione della tabella notifiche se non esiste già nel database
statement.Exec()
//prepara l'inserimenti della notifica
statement, err = database.Prepare("INSERT INTO notifiche(server, servizio, piattaforma, reperibile, cellulare, messaggio, timestamp) VALUES(?,?,?,?,?,?,?)")
if err != nil {
fmt.Println(err.Error())
}
//esegue l'inserimento della notifica passata come argomento della funzione
_, err = statement.Exec(n.Hostname, n.Service, n.Piattaforma, n.Reperibile, n.Cellulare, n.Messaggio, timestamp.Unix())
if err != nil {
fmt.Println(err.Error())
}
return
}
//AntiStorm evita che il reperibile riceva troppe chiamate
func AntiStorm(piattaforma string) (err error) {
database, _ := sql.Open("sqlite3", "./sarumann.db")
defer database.Close()
row := database.QueryRow("SELECT timestamp FROM notifiche where piattaforma = ? order by timestamp desc limit 1", piattaforma)
var last string
row.Scan(&last)
fmt.Println(last) //debug
lastint, err := strconv.Atoi(last)
if err != nil {
fmt.Println("errore")
}
oraepoch := time.Now().Unix()
fmt.Println(oraepoch) //debug
//Se non sono passati tot secondi dall'ultima notifica allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func CreateNotificaNoVoiceCall(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
return
}
func isfob(ora time.Time, foborainizio int) (ok bool) {
//or | a := time.Now()
giorno := ora.Weekday()
//Partiamo che non siamo in FOB
ok = false
switch giorno {
//Se è sabato siamo in fob
case time.Saturday:
//fmt.Println("E' sabato")
ok = true
//Se è domenica siamo in fob
case time.Sunday:
//fmt.Println("E' Domenica")
ok = true
//Se invece è un giorno feriale dobbiamo vedere l'orario
default:
//se è dopo le 18 siamo in fob
//Si avviso il reperibile mezz'ora prima se è un problema si può cambiare
//Recupero l'ora del FOB dal file di configurazione
if ora.Hour() >= foborainizio { | identifier_body |
|
server.go | allora esce
tot := 1800 ///1800 secondi uguale mezz'ora
if lastint+(tot) > int(oraepoch) {
err = fmt.Errorf("Troppe chiamate al reperibile per %s, è permessa una sola chiamata ogni %d secondi", piattaforma, tot)
return err
}
return nil
}
//CreateNotificaNoVoiceCall riceve gli alerts dei nagios
func CreateNotificaNoVoiceCall(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
//Invia cmq la chiamata se è per la piattaforma CDN
if piattaforma == "CDN" {
Cellpertest := viper.GetString("Cellpertest")
if len(Cellpertest) != 0 {
reperibile = Cellpertest
log.Println("Impostato reperibile di test", reperibile)
}
orariofobstr := viper.GetString("OrarioFob")
orariofob, err := strconv.Atoi(orariofobstr)
if err != nil {
log.Println(err.Error())
}
log.Println("L'orario impostato per inizio FOB è", orariofob)
//Se siamo in fuori orario base
if fob := isfob(time.Now(), orariofob); fob == true {
fmt.Println("Siamo in FOB. Notifiche vocali attive!")
//Verifica che sia passato abbastanza tempo dall'ultima chiamata prima di chiamare nuovamente
errstorm := AntiStorm(p.Piattaforma)
if errstorm != nil {
log.Println(errstorm)
return
}
//Logga sul db la notifica in entrata
err := LogNotifica(p)
if err != nil {
log.Println(err.Error())
}
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
}
}
return
}
//CreateNotifica riceve gli alerts dei nagios e li utilizza per
//allertare telefonicamente il reperibile in turno
func CreateNotifica(w http.ResponseWriter, r *http.Request) {
//Crea p come tipo Notifica con i suoi structs
var p Notifica
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&p); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
//fmt.Println(p) //debug
hostname, err := url.QueryUnescape(p.Hostname)
service, err := url.QueryUnescape(p.Service)
piattaforma, err := url.QueryUnescape(p.Piattaforma)
reperibile, err := url.QueryUnescape(p.Reperibile)
cellulare, err := url.QueryUnescape(p.Cellulare)
messaggio, err := url.QueryUnescape(p.Messaggio)
if err != nil {
respondWithError(w, http.StatusBadRequest, err.Error())
log.Fatal(err.Error())
return
}
result := fmt.Sprintf("Ok. campi ricevuti: Hostname: %s, Service: %s, Piattaforma: %s, Reperibile: %s, Cellulare: %s, Messaggio: %s", hostname, service, piattaforma, reperibile, cellulare, messaggio)
respondWithJSON(w, http.StatusCreated, result)
//log.Println("ok")
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), result)
CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio)
return
}
func isfob(ora time.Time, foborainizio int) (ok bool) {
//ora := time.Now()
giorno := ora.Weekday()
//Partiamo che non siamo in FOB
ok = false
switch giorno {
//Se è sabato siamo in fob
case time.Saturday:
//fmt.Println("E' sabato")
ok = true
//Se è domenica siamo in fob
case time.Sunday:
//fmt.Println("E' Domenica")
ok = true
//Se invece è un giorno feriale dobbiamo vedere l'orario
default:
//se è dopo le 18 siamo in fob
//Si avviso il reperibile mezz'ora prima se è un problema si può cambiare
//Recupero l'ora del FOB dal file di configurazione
if ora.Hour() >= foborainizio {
//fmt.Println("Giorno feriale", viper.GetInt("foborainizio"))
ok = true
return ok
}
//se è prima delle 7 allora siamo in fob
if ora.Hour() < 7 {
ok = true
}
}
//Ritorna ok che sarà true o false a seconda se siamo in FOB o no
return ok
}
//CreateCall crea il file .call che serve ad Asterisk per contattare il reperibile
func CreateCall(hostname, service, piattaforma, reperibile, cellulare, messaggio string) (err error) {
//Trasforma il campo passato in una stringa di 10 numeri
cell, err := verificaCell(reperibile)
if err != nil {
log.Printf("Cellulare non gestibile: %s\n", err.Error())
return
}
scheletro :=
`Channel: SIP/999` + cell + `@10.31.18.26
MaxRetries: 5
RetryTime: 300
WaitTime: 60
Context: nagios-notify
Extension: s
Archive: Yes
Set: CONTACT_NAME="Gringo"
Set: PLAT_NAME="` + piattaforma + `"
Set: NOT_TYPE="PROBLEM"
Set: HOST_ALIAS="` + hostname + `"
Set: SERVICE_NAME="` + service + `"
Set: STATUS="Critico"
Set: NOT_HEAD_MSG="è stato riscontrato un problema"
Set: SRV_MSG="sul server ` + hostname + ` il servizio ` + service + ` è in critical ` + messaggio + `"`
//dove salavare i file in maniera che asterisk li possa scaricare
//nel nostro caso equivale a dove nginx tiene i contenuti statici del webserver
//le informazioni sono nel file nascosto .sarumann.yaml che l'utente deve avere
//nella propria $HOME
//path := viper.GetString("CallPath")
//file, err := os.Create(path + "exampleTest.call") // Truncates if file already exists, be careful!
file, err := os.Create("/tmp/exampleTest.call")
if err != nil {
log.Fatalf("failed creating file: %s", err)
}
defer file.Close() // Make sure to close the file when you're done
_, err = file.WriteString(scheletro)
if err != nil {
log.Fatalf("failed writing to file: %s", err)
}
//fmt.Printf("\nLength: %d bytes", len)
fmt.Printf("\nFile Name: %s\n", file.Name())
return
}
//verificaCell verifica che il cell sia una stringa di 10 cifre
func verificaCell(value string) (cell string, err error) {
//se value ha meno di 10 cifre non è buono
if len(value) < 10 {
err := fmt.Err | orf("Cellulare con poche cifre: %v", len(value))
log.Println(err.Error())
return "", err
}
//cell10cifre pr | conditional_block |
|
mod.rs | found that certain wavelengths of light, which are usually absorbed by water,
//! weakened when the planet was in the way, indicating not only does K2-18b have an atmosphere, but the atmosphere
//! contains water in vapour form. The team from UCL then analyzed the Montreal team's data using their own software
//! and confirmed their conclusion. This was not the first time scientists have found signs of water on an exoplanet,
//! but previous discoveries were made on planets with high temperatures or other pronounced differences from Earth.
//! \"This is the first potentially habitable planet where the temperature is right and where we now know there is water,\"
//! said UCL astronomer Angelos Tsiaras. \"It's the best candidate for habitability right now.\" \"It's a good sign\",
//! said Ryan Cloutier of the Harvard–Smithsonian Center for Astrophysics, who was not one of either study's authors.
//! \"Overall,\" he continued, \"the presence of water in its atmosphere certainly improves the prospect of K2-18b being
//! a potentially habitable planet, but further observations will be required to say for sure. \"
//! K2-18b was first identified in 2015 by the Kepler space telescope. It is about 110 light-years from Earth and larger
//! but less dense. Its star, a red dwarf, is cooler than the Sun, but the planet's orbit is much closer, such that a year
//! on K2-18b lasts 33 Earth days. According to The Guardian, astronomers were optimistic that NASA's James Webb space
//! telescope — scheduled for launch in 2021 — and the European Space Agency's 2028 ARIEL program, could reveal more
//! about exoplanets like K2-18b."];
//!
//! let output = model.summarize(&input);
//! # Ok(())
//! # }
//! ```
//! (example from: [WikiNews](https://en.wikinews.org/wiki/Astronomers_find_water_vapour_in_atmosphere_of_exoplanet_K2-18b))
//!
//! Example output: \
//! ```ignore
//! # let output =
//! "Scientists have found water vapour on K2-18b, a planet 110 light-years from Earth.
//! This is the first such discovery in a planet in its star's habitable zone.
//! The planet is not too hot and not too cold for liquid water to exist."
//! # ;
//! ```
//!
//!
//! #### 4. Dialogue Model
//! Conversation model based on Microsoft's [DialoGPT](https://github.com/microsoft/DialoGPT).
//! This pipeline allows the generation of single or multi-turn conversations between a human and a model.
//! The DialoGPT's page states that
//! > The human evaluation results indicate that the response generated from DialoGPT is comparable to human response quality
//! > under a single-turn conversation Turing test. ([DialoGPT repository](https://github.com/microsoft/DialoGPT))
//!
//! The model uses a `ConversationManager` to keep track of active conversations and generate responses to them.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::conversation::{ConversationManager, ConversationModel};
//! let conversation_model = ConversationModel::new(Default::default())?;
//! let mut conversation_manager = ConversationManager::new();
//!
//! let conversation_id =
//! conversation_manager.create("Going to the movies tonight - any suggestions?");
//! let output = conversation_model.generate_responses(&mut conversation_manager);
//! # Ok(())
//! # }
//! ```
//! Example output: \
//! ```ignore
//! # let output =
//! "The Big Lebowski."
//! # ;
//! ```
//!
//! #### 5. Natural Language Generation
//! Generate language based on a prompt. GPT2 and GPT available as base models.
//! Include techniques such as beam search, top-k and nucleus sampling, temperature setting and repetition penalty.
//! Supports batch generation of sentences from several prompts. Sequences will be left-padded with the model's padding token if present, the unknown token otherwise.
//! This may impact the results and it is recommended to submit prompts of similar length for best results. Additional information on the input parameters for generation is provided in this module's documentation.
//!
//! ```ignore
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::text_generation::TextGenerationModel;
//! use rust_bert::pipelines::common::ModelType;
//! let mut model = TextGenerationModel::new(Default::default())?;
//! let input_context_1 = "The dog";
//! let input_context_2 = "The cat was";
//!
//! let prefix = None; // Optional prefix to append prompts with, will be excluded from the generated output
//!
//! let output = model.generate(&[input_context_1, input_context_2], prefix);
//! # Ok(())
//! # }
//! ```
//! Example output: \
//! ```ignore
//! # let output =
//! [
//! "The dog's owners, however, did not want to be named. According to the lawsuit, the animal's owner, a 29-year",
//! "The dog has always been part of the family. \"He was always going to be my dog and he was always looking out for me",
//! "The dog has been able to stay in the home for more than three months now. \"It's a very good dog. She's",
//! "The cat was discovered earlier this month in the home of a relative of the deceased. The cat\'s owner, who wished to remain anonymous,",
//! "The cat was pulled from the street by two-year-old Jazmine.\"I didn't know what to do,\" she said",
//! "The cat was attacked by two stray dogs and was taken to a hospital. Two other cats were also injured in the attack and are being treated."
//! ]
//! # ;
//! ```
//!
//! #### 6. Zero-shot classification
//! Performs zero-shot classification on input sentences with provided labels using a model fine-tuned for Natural Language Inference.
//! ```ignore
//! # use rust_bert::pipelines::zero_shot_classification::ZeroShotClassificationModel;
//! # fn main() -> anyhow::Result<()> {
//! let sequence_classification_model = ZeroShotClassificationModel::new(Default::default())?;
//! let input_sentence = "Who are you voting for in 2020?";
//! let input_sequence_2 = "The prime minister has announced a stimulus package which was widely criticized by the opposition.";
//! let candidate_labels = &["politics", "public health", "economics", "sports"];
//! let output = sequence_classification_model.predict_multilabel(
//! &[input_sentence, input_sequence_2],
//! candidate_labels,
//! None,
//! 128,
//! ); | //! ```ignore
//! # use rust_bert::pipelines::sequence_classification::Label;
//! let output = [
//! [
//! Label {
//! text: "politics".to_string(),
//! score: 0.972,
//! id: 0,
//! sentence: 0,
//! },
//! Label {
//! text: "public health".to_string(),
//! score: 0.032,
//! id: 1,
//! sentence: 0,
//! },
//! Label {
//! text: "economics".to_string(),
//! score: 0.006,
//! id: 2,
//! sentence: 0,
//! },
//! Label {
//! text: "sports".to_string(),
//! score: 0.004,
//! id: 3,
//! sentence: 0,
//! },
//! ],
//! [
//! Label {
//! text: "politics".to_string(),
//! score: 0.975,
//! id: 0,
//! sentence: 1,
//! },
//! Label {
//! text: "economics".to_string(),
//! score: 0.852,
//! id: 2,
//! sentence: 1,
//! },
//! Label {
//! text: "public health".to_string(),
//! score: 0.0818,
//! id: 1,
//! sentence: 1,
//! },
//! Label {
//! text: "sports".to_string(),
//! score: 0.001,
//! id: 3,
//! sentence: 1,
//! },
//! ],
//! ]
//! .to_vec();
//! ```
//!
//! #### 7. Sentiment analysis
//! Predicts the binary sentiment for a sentence. DistilBERT model finetuned on SST-2.
//! ```ignore
//! use rust_bert::pipelines::sentiment::SentimentModel;
//! # fn main() -> anyhow::Result<()> {
//! let sentiment_model = SentimentModel::new(Default::default())?;
//! let input = [
//! "Probably my all-time favorite movie, a story of selflessness, sacrifice and dedication to a noble cause, but it's not preachy or boring.",
//! "This film tried to be too many things all at once: stinging political satire, Hollywood blockbuster, sappy romantic comedy, family values promo...",
//! "If you like original gut wrenching laughter you will like this movie. If you are young or old then you will love this movie, hell even my mom liked it.",
//! ];
//! let | //! # Ok(())
//! # }
//! ```
//!
//! outputs: | random_line_split |
backfill.py | # Finished iterating over all available accounts
break
# Decode account
path_to_leaf, address_hash_nibbles, encoded_account = next_account_info
account = rlp.decode(encoded_account, sedes=Account)
# Iterate over all missing hashes of subcomponents (storage & bytecode)
subcomponent_hashes_iterator = self._missing_subcomponent_hashes(
address_hash_nibbles,
account,
starting_root_hash,
)
async for node_request in subcomponent_hashes_iterator:
yield node_request
# Check if account is fully downloaded
account_components_complete = self._are_account_components_complete(
address_hash_nibbles,
account,
)
if account_components_complete:
# Mark fully downloaded accounts as complete, and do some cleanup
self._mark_account_complete(path_to_leaf, address_hash_nibbles)
else:
# Pause accounts that are not fully downloaded, and track the account
# to resume when the generator exits.
self._account_tracker.pause_review(path_to_leaf)
exhausted_account_leaves += (path_to_leaf, )
except GeneratorExit:
# As the generator is exiting, we want to resume any paused accounts. This
# allows us to find missing storage/bytecode on the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
raise
else:
# If we pause a few accounts and then run out of nodes to ask for, then we
# still need to resume the paused accounts to prepare for the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
# Possible scenarios:
# 1. We have completed backfill
# 2. We have iterated the available nodes, and all known hashes are being requested.
# For example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes, and exit cleanly.
#
# In response to these situations, we might like to:
# 1. Log and celebrate that the full state has been downloaded
# 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it, using a _check_complete() check.
return
async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
"""
Walk through the supplied trie, yielding the request tracker and node
request for any missing trie nodes.
:yield: path to leaf node, a key (as nibbles), and the value found in the trie
:raise: MissingTraversalNode if a node is missing while walking the trie
"""
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start at
return
trie = HexaryTrie(self._db, root_hash)
starting_index = bytes_to_nibbles(root_hash)
while self.manager.is_running:
try:
path_to_node = request_tracker.next_path_to_explore(starting_index)
except trie_exceptions.PerfectVisibility:
# This doesn't necessarily mean we are finished.
# Any active prefixes might still be hiding some significant portion of the trie
# But it's all we're able to explore for now, until more node data arrives
return
try:
cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)
except KeyError:
cached_node = None
node_getter = partial(trie.traverse, path_to_node)
else:
node_getter = partial(trie.traverse_from, cached_node, uncached_key)
try:
node = node_getter()
except trie_exceptions.MissingTraversalNode as exc:
# Found missing account trie node
if path_to_node == exc.nibbles_traversed:
raise
elif cached_node is None:
# The path and nibbles traversed should always match in a non-cached traversal
raise RuntimeError(
f"Unexpected: on a non-cached traversal to {path_to_node}, the"
f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}"
) from exc
else:
# We need to re-raise a version of the exception that includes the whole path
# from the root node (when using cached nodes, we only have the path from
# the parent node to the child node)
# We could always raise this re-wrapped version, but skipping it (probably?)
# improves performance.
missing_hash = exc.missing_node_hash
raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc
except trie_exceptions.TraversedPartialPath as exc:
node = exc.simulated_node
if node.value:
full_key_nibbles = path_to_node + node.suffix
if len(node.sub_segments):
# It shouldn't be a problem to skip handling this case, because all keys are
# hashed 32 bytes.
raise NotImplementedError(
"The state backfiller doesn't handle keys of different lengths, where"
f" one key is a prefix of another. But found {node} in trie with"
f" {root_hash!r}"
)
yield path_to_node, full_key_nibbles, node.value
# Note that we do not mark value nodes as completed. It is up to the caller
# to do that when it is ready. For example, the storage iterator will
# immediately treat the key as completed. The account iterator will
# not treat the key as completed until all of its storage and bytecode
# are also marked as complete.
else:
# If this is just an intermediate node, then we can mark it as confirmed.
request_tracker.confirm_prefix(path_to_node, node)
async def _missing_subcomponent_hashes(
self,
address_hash_nibbles: Nibbles,
account: Account,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
storage_node_iterator = self._missing_storage_hashes(
address_hash_nibbles,
account.storage_root,
starting_main_root,
)
async for node_request in storage_node_iterator:
yield node_request
bytecode_node_iterator = self._missing_bytecode_hashes(
address_hash_nibbles,
account.code_hash,
starting_main_root,
)
async for node_request in bytecode_node_iterator:
yield node_request
# Note that completing this iterator does NOT mean we're done with the
# account. It just means that all known missing hashes are actively
# being requested.
async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Walks through the storage trie at the given root, yielding one missing
storage node hash/prefix at a time.
The yielded node info is wrapped in a ``TrackedRequest``. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes. | # 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32 | #
# In response to these situations, we might like to:
# 1. Debug log? | random_line_split |
backfill.py | # Finished iterating over all available accounts
break
# Decode account
path_to_leaf, address_hash_nibbles, encoded_account = next_account_info
account = rlp.decode(encoded_account, sedes=Account)
# Iterate over all missing hashes of subcomponents (storage & bytecode)
subcomponent_hashes_iterator = self._missing_subcomponent_hashes(
address_hash_nibbles,
account,
starting_root_hash,
)
async for node_request in subcomponent_hashes_iterator:
yield node_request
# Check if account is fully downloaded
account_components_complete = self._are_account_components_complete(
address_hash_nibbles,
account,
)
if account_components_complete:
# Mark fully downloaded accounts as complete, and do some cleanup
self._mark_account_complete(path_to_leaf, address_hash_nibbles)
else:
# Pause accounts that are not fully downloaded, and track the account
# to resume when the generator exits.
self._account_tracker.pause_review(path_to_leaf)
exhausted_account_leaves += (path_to_leaf, )
except GeneratorExit:
# As the generator is exiting, we want to resume any paused accounts. This
# allows us to find missing storage/bytecode on the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
raise
else:
# If we pause a few accounts and then run out of nodes to ask for, then we
# still need to resume the paused accounts to prepare for the next iteration.
for path_to_leaf in exhausted_account_leaves:
self._account_tracker.mark_for_review(path_to_leaf)
# Possible scenarios:
# 1. We have completed backfill
# 2. We have iterated the available nodes, and all known hashes are being requested.
# For example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes, and exit cleanly.
#
# In response to these situations, we might like to:
# 1. Log and celebrate that the full state has been downloaded
# 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it, using a _check_complete() check.
return
async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
"""
Walk through the supplied trie, yielding the request tracker and node
request for any missing trie nodes.
:yield: path to leaf node, a key (as nibbles), and the value found in the trie
:raise: MissingTraversalNode if a node is missing while walking the trie
"""
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start at
return
trie = HexaryTrie(self._db, root_hash)
starting_index = bytes_to_nibbles(root_hash)
while self.manager.is_running:
try:
path_to_node = request_tracker.next_path_to_explore(starting_index)
except trie_exceptions.PerfectVisibility:
# This doesn't necessarily mean we are finished.
# Any active prefixes might still be hiding some significant portion of the trie
# But it's all we're able to explore for now, until more node data arrives
return
try:
cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)
except KeyError:
cached_node = None
node_getter = partial(trie.traverse, path_to_node)
else:
node_getter = partial(trie.traverse_from, cached_node, uncached_key)
try:
node = node_getter()
except trie_exceptions.MissingTraversalNode as exc:
# Found missing account trie node
if path_to_node == exc.nibbles_traversed:
raise
elif cached_node is None:
# The path and nibbles traversed should always match in a non-cached traversal
raise RuntimeError(
f"Unexpected: on a non-cached traversal to {path_to_node}, the"
f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}"
) from exc
else:
# We need to re-raise a version of the exception that includes the whole path
# from the root node (when using cached nodes, we only have the path from
# the parent node to the child node)
# We could always raise this re-wrapped version, but skipping it (probably?)
# improves performance.
missing_hash = exc.missing_node_hash
raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc
except trie_exceptions.TraversedPartialPath as exc:
node = exc.simulated_node
if node.value:
full_key_nibbles = path_to_node + node.suffix
if len(node.sub_segments):
# It shouldn't be a problem to skip handling this case, because all keys are
# hashed 32 bytes.
raise NotImplementedError(
"The state backfiller doesn't handle keys of different lengths, where"
f" one key is a prefix of another. But found {node} in trie with"
f" {root_hash!r}"
)
yield path_to_node, full_key_nibbles, node.value
# Note that we do not mark value nodes as completed. It is up to the caller
# to do that when it is ready. For example, the storage iterator will
# immediately treat the key as completed. The account iterator will
# not treat the key as completed until all of its storage and bytecode
# are also marked as complete.
else:
# If this is just an intermediate node, then we can mark it as confirmed.
request_tracker.confirm_prefix(path_to_node, node)
async def | (
self,
address_hash_nibbles: Nibbles,
account: Account,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
storage_node_iterator = self._missing_storage_hashes(
address_hash_nibbles,
account.storage_root,
starting_main_root,
)
async for node_request in storage_node_iterator:
yield node_request
bytecode_node_iterator = self._missing_bytecode_hashes(
address_hash_nibbles,
account.code_hash,
starting_main_root,
)
async for node_request in bytecode_node_iterator:
yield node_request
# Note that completing this iterator does NOT mean we're done with the
# account. It just means that all known missing hashes are actively
# being requested.
async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Walks through the storage trie at the given root, yielding one missing
storage node hash/prefix at a time.
The yielded node info is wrapped in a ``TrackedRequest``. The hash is
marked as active until it is explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes.
#
# In response to these situations, we might like to:
# 1. Debug log?
# 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32 | _missing_subcomponent_hashes | identifier_name |
backfill.py | explicitly marked for review again. The
hash/prefix will be marked for review asking a peer for the data.
Will exit when all known node hashes are already actively being
requested, or if there are no more missing nodes.
"""
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
return
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
while self.manager.is_running:
storage_iterator = self._request_tracking_trie_items(
storage_tracker,
storage_root,
)
try:
async for path_to_leaf, hashed_key, _storage_value in storage_iterator:
# We don't actually care to look at the storage keys/values during backfill
storage_tracker.confirm_leaf(path_to_leaf)
except trie_exceptions.MissingTraversalNode as exc:
yield storage_tracker.generate_request(
exc.missing_node_hash,
exc.nibbles_traversed,
)
else:
# Possible scenarios:
# 1. We have completed backfilling this account's storage
# 2. We have iterated the available nodes, and only their children are missing,
# for example: if 0 nodes are available, and we walk to the root and request
# the root from a peer, we do not have any available information to ask for
# more nodes.
#
# In response to these situations, we might like to:
# 1. Debug log?
# 2. Look for more missing nodes in neighboring accounts and their storage, etc.
#
# 1 and 2 are a little more cleanly handled outside this iterator, so we just
# exit and let the caller deal with it.
return
async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
"""
Checks if this bytecode is missing. If so, yield it and then exit.
If not, then exit immediately.
This may seem like overkill, and it is right now. But...
Code merkelization is coming (theoretically), and the other account
and storage trie iterators work similarly to this, so in some ways
it's easier to do this "over-generalized" solution now. It makes
request tracking a bit easier too, to have the same TrackedRequest
result mechanism.
"""
if code_hash == EMPTY_SHA3:
# Nothing to do if the bytecode is for the empty hash
return
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
if bytecode_tracker.is_complete:
# All bytecode has been collected
return
# If there is an active request (for now, there can only be one), then skip
# any database checks until the active request is resolved.
if not bytecode_tracker.has_active_requests:
if code_hash not in self._db:
# The bytecode isn't present, so we ask for it.
# A bit hacky here, since there is no trie, we just treat it as
# if it were a leaf node at the root.
yield bytecode_tracker.generate_request(code_hash, prefix=())
else:
# The bytecode is already present, but the tracker isn't marked
# as completed yet, so finish it off.
bytecode_tracker.confirm_leaf(path_to_leaf=())
def _get_storage_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._storage_trackers:
return self._storage_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._storage_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _get_bytecode_tracker(self, address_hash_nibbles: Nibbles) -> TrieNodeRequestTracker:
if address_hash_nibbles in self._bytecode_trackers:
return self._bytecode_trackers[address_hash_nibbles]
else:
new_tracker = TrieNodeRequestTracker()
self._bytecode_trackers[address_hash_nibbles] = new_tracker
return new_tracker
def _mark_account_complete(self, path_to_leaf: Nibbles, address_hash_nibbles: Nibbles) -> None:
self._account_tracker.confirm_leaf(path_to_leaf)
self._num_accounts_completed += 1
# Clear the storage tracker, to reduce memory usage
# and the time to check self._check_complete()
if address_hash_nibbles in self._storage_trackers:
self._num_storage_completed += 1
del self._storage_trackers[address_hash_nibbles]
# Clear the bytecode tracker, for the same reason
if address_hash_nibbles in self._bytecode_trackers:
del self._bytecode_trackers[address_hash_nibbles]
def _are_account_components_complete(
self,
address_hash_nibbles: Nibbles,
account: Account) -> bool:
if account.storage_root != BLANK_NODE_HASH:
# Avoid generating a storage tracker if there is no storage for this account
storage_tracker = self._get_storage_tracker(address_hash_nibbles)
if account.storage_root == BLANK_NODE_HASH or storage_tracker.is_complete:
if account.code_hash == EMPTY_SHA3:
# All storage is downloaded, and no bytecode to download
return True
else:
bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)
# All storage is downloaded, return True only if bytecode is downloaded
return bytecode_tracker.is_complete
else:
# Missing some storage
return False
async def _make_request(
self,
peer: ETHPeer,
request_data: Iterable[TrackedRequest]) -> None:
self._num_requests_by_peer[peer] += 1
request_hashes = tuple(set(request.node_hash for request in request_data))
try:
nodes = await peer.eth_api.get_node_data(request_hashes)
except asyncio.TimeoutError:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
except PeerConnectionLost:
# Something unhappy, but we don't really care, peer will be gone by next loop
pass
except (BaseP2PError, Exception) as exc:
self.logger.info("Unexpected err while getting background nodes from %s: %s", peer, exc)
self.logger.debug("Problem downloading background nodes from peer...", exc_info=True)
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS * 2)
else:
self._queening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS)
self._insert_results(request_hashes, nodes)
finally:
for request in request_data:
request.tracker.mark_for_review(request.prefix)
def _insert_results(
self,
requested_hashes: Tuple[Hash32, ...],
nodes: Tuple[Tuple[Hash32, bytes], ...]) -> None:
returned_nodes = dict(nodes)
with self._db.atomic_batch() as write_batch:
for requested_hash in requested_hashes:
if requested_hash in returned_nodes:
self._num_added += 1
self._total_added_nodes += 1
encoded_node = returned_nodes[requested_hash]
write_batch[requested_hash] = encoded_node
else:
self._num_missed += 1
def set_root_hash(self, header: BlockHeaderAPI, root_hash: Hash32) -> None:
if self._next_trie_root_hash is None:
self._next_trie_root_hash = root_hash
self._begin_backfill.set()
elif header.block_number % EPOCH_BLOCK_LENGTH == 1:
# This is the root hash of the *parent* of the header, so use modulus equals 1
self._next_trie_root_hash = root_hash
async def _periodically_report_progress(self) -> None:
for step in itertools.count():
if not self.manager.is_running:
break
self._num_added = 0
self._num_missed = 0
timer = Timer()
await asyncio.sleep(self._report_interval)
if not self._begin_backfill.is_set():
self.logger.debug("Beam-Backfill: waiting for new state root")
continue
msg = "total=%d" % self._total_added_nodes
msg += " new=%d" % self._num_added
msg += " miss=%d" % self._num_missed
self.logger.debug("Beam-Backfill: %s", msg)
# log peer counts
show_top_n_peers = 3
self.logger.debug(
"Beam-Backfill-Peer-Usage-Top-%d: %s",
show_top_n_peers,
self._num_requests_by_peer.most_common(show_top_n_peers),
)
# For now, report every 30s (1/3 as often as the debug report above)
if step % 3 == 0:
num_storage_trackers = len(self._storage_trackers)
if num_storage_trackers:
| active_storage_completion = sum(
self._complete_trie_fraction(store_tracker)
for store_tracker in self._storage_trackers.values()
) / num_storage_trackers | conditional_block |
|
backfill.py | ening_queue.readd_peasant(peer, GAP_BETWEEN_TESTS)
self._insert_results(request_hashes, nodes)
finally:
for request in request_data:
request.tracker.mark_for_review(request.prefix)
def _insert_results(
self,
requested_hashes: Tuple[Hash32, ...],
nodes: Tuple[Tuple[Hash32, bytes], ...]) -> None:
returned_nodes = dict(nodes)
with self._db.atomic_batch() as write_batch:
for requested_hash in requested_hashes:
if requested_hash in returned_nodes:
self._num_added += 1
self._total_added_nodes += 1
encoded_node = returned_nodes[requested_hash]
write_batch[requested_hash] = encoded_node
else:
self._num_missed += 1
def set_root_hash(self, header: BlockHeaderAPI, root_hash: Hash32) -> None:
if self._next_trie_root_hash is None:
self._next_trie_root_hash = root_hash
self._begin_backfill.set()
elif header.block_number % EPOCH_BLOCK_LENGTH == 1:
# This is the root hash of the *parent* of the header, so use modulus equals 1
self._next_trie_root_hash = root_hash
async def _periodically_report_progress(self) -> None:
for step in itertools.count():
if not self.manager.is_running:
break
self._num_added = 0
self._num_missed = 0
timer = Timer()
await asyncio.sleep(self._report_interval)
if not self._begin_backfill.is_set():
self.logger.debug("Beam-Backfill: waiting for new state root")
continue
msg = "total=%d" % self._total_added_nodes
msg += " new=%d" % self._num_added
msg += " miss=%d" % self._num_missed
self.logger.debug("Beam-Backfill: %s", msg)
# log peer counts
show_top_n_peers = 3
self.logger.debug(
"Beam-Backfill-Peer-Usage-Top-%d: %s",
show_top_n_peers,
self._num_requests_by_peer.most_common(show_top_n_peers),
)
# For now, report every 30s (1/3 as often as the debug report above)
if step % 3 == 0:
num_storage_trackers = len(self._storage_trackers)
if num_storage_trackers:
active_storage_completion = sum(
self._complete_trie_fraction(store_tracker)
for store_tracker in self._storage_trackers.values()
) / num_storage_trackers
else:
active_storage_completion = 0
# Log backfill state stats as a progress indicator to the user:
# - nodes: the total number of nodes collected during this backfill session
# - accts: number of accounts completed, including all storage and bytecode,
# if present. This includes accounts downloaded and ones already present.
# - prog: the progress to completion, measured as a percentage of accounts
# completed, using trie structure. Ignores imbalances caused by storage.
# - stores: number of non-trivial complete storages downloaded
# - storing: the percentage complete and number of storage tries being
# downloaded actively
# - walked: the part of the account trie walked from this
# epoch's index, as parts per million (a fraction of the
# total account trie)
# - tnps: trie nodes collected per second, since the last debug log (in the
# last 10 seconds, at comment time)
num_requests = sum(self._num_requests_by_peer.values())
if num_requests == 0:
log = self.logger.debug
else:
log = self.logger.info
log(
(
"State Stats: nodes=%d accts=%d prog=%.2f%% stores=%d"
" storing=%.1f%% of %d walked=%.1fppm tnps=%.0f req=%d"
),
self._total_added_nodes,
self._num_accounts_completed,
self._complete_trie_fraction(self._account_tracker) * 100,
self._num_storage_completed,
active_storage_completion * 100,
num_storage_trackers,
self._contiguous_accounts_complete_fraction() * 1e6,
self._num_added / timer.elapsed,
num_requests,
)
self._num_requests_by_peer.clear()
def _complete_trie_fraction(self, tracker: TrieNodeRequestTracker) -> float:
"""
Calculate stats for logging: estimate what percent of the trie is completed,
by looking at unexplored prefixes in the account trie.
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion
One awkward thing: there will be no apparent progress while filling in
the storage of a single large account. Progress is slow enough anyway
that this is probably immaterial.
"""
# Move this logic into HexaryTrieFog someday
unknown_prefixes = tracker._trie_fog._unexplored_prefixes
# Basic estimation logic:
# - An unknown prefix 0xf means that we are missing 1/16 of the trie
# - An unknown prefix 0x12 means that we are missing 1/(16^2) of the trie
# - Add up all the unknown prefixes to estimate the total collected fraction.
unknown_fraction = sum(
(1 / 16) ** len(prefix)
for prefix in unknown_prefixes
)
return 1 - unknown_fraction
def _contiguous_accounts_complete_fraction(self) -> float:
"""
Estimate the completed fraction of the trie that is contiguous with
the current index (which rotates every 32 blocks)
It will be probably be quite noticeable that it will get "stuck" when
downloading a lot of storage, because we'll have to blow it up to more
than a percentage to see any significant change within 32 blocks. (when
the index will change again anyway)
:return: a number in the range [0, 1] (+/- rounding error) estimating
trie completion contiguous with the current backfill index key
"""
starting_index = bytes_to_nibbles(self._next_trie_root_hash)
unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes
if len(unknown_prefixes) == 0:
return 1
# find the nearest unknown prefix (typically, on the right)
nearest_index = unknown_prefixes.bisect(starting_index)
# Get the nearest unknown prefix to the left
if nearest_index == 0:
left_prefix = (0, ) * 64
else:
left_prefix = unknown_prefixes[nearest_index - 1]
if key_starts_with(starting_index, left_prefix):
# The prefix of the starting index is unknown, so the index
# itself is unknown.
return 0
# Get the nearest unknown prefix to the right
if len(unknown_prefixes) == nearest_index:
right_prefix = (0xf, ) * 64
else:
right_prefix = unknown_prefixes[nearest_index]
# Use the space between the unknown prefixes to estimate the completed contiguous fraction
# At the base, every gap in the first nibble is a full 1/16th of the state complete
known_first_nibbles = right_prefix[0] - left_prefix[0] - 1
completed_fraction_base = (1 / 16) * known_first_nibbles
# Underneath, you can count completed subtrees on the right, each child 1/16 of the parent
right_side_completed = sum(
nibble * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(right_prefix[1:], 2)
)
# Do the same on the left
left_side_completed = sum(
(0xf - nibble) * (1 / 16) ** nibble_depth
for nibble_depth, nibble
in enumerate(left_prefix[1:], 2)
)
# Add up all completed areas
return left_side_completed + completed_fraction_base + right_side_completed
class TrieNodeRequestTracker:
| def __init__(self) -> None:
self._trie_fog = fog.HexaryTrieFog()
self._active_prefixes: Set[Nibbles] = set()
# cache of nodes used to speed up trie walking
self._node_frontier_cache = fog.TrieFrontierCache()
def mark_for_review(self, prefix: Nibbles) -> None:
# Calling this does not mean that the nodes were returned, only that they are eligible again
# for review (either they were returned or we can ask a different peer for them)
self._active_prefixes.remove(prefix)
def pause_review(self, prefix: Nibbles) -> None:
"""
Stop iterating this node, until mark_for_review() is called
"""
self._active_prefixes.add(prefix)
def _get_eligible_fog(self) -> fog.HexaryTrieFog:
""" | identifier_body |
|
copy_up.go | file is
// written to.
//
// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu.
//
// Caveats:
//
// If any step in copying up a file fails, copyUp cleans the upper
// filesystem of any partially up-to-date file. If this cleanup fails,
// the overlay may be in an unacceptable, inconsistent state, so copyUp
// panics. If copyUp fails because any step (above) fails, a generic
// error is returned.
//
// copyUp currently makes no attempt to optimize copying up file content.
// For large files, this means that copyUp blocks until the entire file
// is copied synchronously.
func copyUp(ctx context.Context, d *Dirent) error {
renameMu.RLock()
defer renameMu.RUnlock()
return copyUpLockedForRename(ctx, d)
}
// copyUpLockedForRename is the same as copyUp except that it does not lock
// renameMu.
//
// It copies each component of d that does not yet exist in the upper
// filesystem. If d already exists in the upper filesystem, it is a no-op.
//
// Any error returned indicates a failure to copy all of d. This may
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
// Preconditions:
// - d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
// already exist something in the upper filesystem
// for d?
d.Inode.overlay.copyMu.RLock()
if d.Inode.overlay.upper != nil {
d.Inode.overlay.copyMu.RUnlock()
// Done, d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
}
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower
link, err := childLower.Readlink(ctx)
if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil | {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(memmap.MappingsOfRange)
for m := range seg.Value() {
if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {
for m := range added {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
for mr, mappings := range allAdded {
for m := range mappings {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)
}
}
return err
}
added[m] = struct{}{}
}
allAdded[seg.Range()] = added | conditional_block |
|
copy_up.go | upper
// filesystem.
//
// Synchronization:
//
// copyUp synchronizes with rename(2) using renameMu to ensure that
// parentage does not change while a file is being copied. In the context
// of rename(2), copyUpLockedForRename should be used to avoid deadlock on
// renameMu.
//
// The following operations synchronize with copyUp using copyMu:
//
// - InodeOperations, i.e. to ensure that looking up a directory takes
// into account new upper filesystem directories created by copy up,
// which subsequently can be modified.
//
// - FileOperations, i.e. to ensure that reading from a file does not
// continue using a stale, lower filesystem handle when the file is
// written to.
//
// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu.
//
// Caveats:
//
// If any step in copying up a file fails, copyUp cleans the upper
// filesystem of any partially up-to-date file. If this cleanup fails,
// the overlay may be in an unacceptable, inconsistent state, so copyUp
// panics. If copyUp fails because any step (above) fails, a generic
// error is returned.
//
// copyUp currently makes no attempt to optimize copying up file content.
// For large files, this means that copyUp blocks until the entire file
// is copied synchronously.
func copyUp(ctx context.Context, d *Dirent) error {
renameMu.RLock()
defer renameMu.RUnlock()
return copyUpLockedForRename(ctx, d)
}
// copyUpLockedForRename is the same as copyUp except that it does not lock
// renameMu.
//
// It copies each component of d that does not yet exist in the upper
// filesystem. If d already exists in the upper filesystem, it is a no-op.
//
// Any error returned indicates a failure to copy all of d. This may
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
// Preconditions:
// - d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
// already exist something in the upper filesystem
// for d?
d.Inode.overlay.copyMu.RLock()
if d.Inode.overlay.upper != nil {
d.Inode.overlay.copyMu.RUnlock()
// Done, d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error |
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower
link, err := childLower.Readlink(ctx)
if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(mem | {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
} | identifier_body |
copy_up.go | the upper
// filesystem.
//
// Synchronization:
//
// copyUp synchronizes with rename(2) using renameMu to ensure that
// parentage does not change while a file is being copied. In the context
// of rename(2), copyUpLockedForRename should be used to avoid deadlock on
// renameMu.
//
// The following operations synchronize with copyUp using copyMu:
//
// - InodeOperations, i.e. to ensure that looking up a directory takes
// into account new upper filesystem directories created by copy up,
// which subsequently can be modified.
//
// - FileOperations, i.e. to ensure that reading from a file does not
// continue using a stale, lower filesystem handle when the file is
// written to.
//
// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu.
//
// Caveats:
//
// If any step in copying up a file fails, copyUp cleans the upper
// filesystem of any partially up-to-date file. If this cleanup fails,
// the overlay may be in an unacceptable, inconsistent state, so copyUp
// panics. If copyUp fails because any step (above) fails, a generic
// error is returned.
//
// copyUp currently makes no attempt to optimize copying up file content.
// For large files, this means that copyUp blocks until the entire file
// is copied synchronously.
func copyUp(ctx context.Context, d *Dirent) error {
renameMu.RLock()
defer renameMu.RUnlock()
return copyUpLockedForRename(ctx, d)
}
// copyUpLockedForRename is the same as copyUp except that it does not lock
// renameMu.
//
// It copies each component of d that does not yet exist in the upper
// filesystem. If d already exists in the upper filesystem, it is a no-op.
//
// Any error returned indicates a failure to copy all of d. This may
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
// Preconditions:
// - d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
// already exist something in the upper filesystem
// for d?
d.Inode.overlay.copyMu.RLock()
if d.Inode.overlay.upper != nil {
d.Inode.overlay.copyMu.RUnlock()
// Done, d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
}
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower | if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(memmap.M | link, err := childLower.Readlink(ctx) | random_line_split |
copy_up.go | d is in the upper filesystem.
return nil
}
d.Inode.overlay.copyMu.RUnlock()
// Find the next component to copy up. We will work our way
// down to the last component of d and finally copy it.
next := findNextCopyUp(ctx, d)
// Attempt to copy.
if err := doCopyUp(ctx, next); err != nil {
return err
}
}
}
// findNextCopyUp finds the next component of d from root that does not
// yet exist in the upper filesystem. The parent of this component is
// also returned, which is the root of the overlay in the worst case.
func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent {
next := d
for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ {
// Does this parent have a non-nil upper Inode?
parent.Inode.overlay.copyMu.RLock()
if parent.Inode.overlay.upper != nil {
parent.Inode.overlay.copyMu.RUnlock()
// Note that since we found an upper, it is stable.
return next
}
parent.Inode.overlay.copyMu.RUnlock()
// Continue searching for a parent with a non-nil
// upper Inode.
next = parent
parent = next.parent
}
}
func doCopyUp(ctx context.Context, d *Dirent) error {
// Fail fast on Inode types we won't be able to copy up anyways. These
// Inodes may block in GetFile while holding copyMu for reading. If we
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
return syserror.EINVAL
}
// Wait to get exclusive access to the upper Inode.
d.Inode.overlay.copyMu.Lock()
defer d.Inode.overlay.copyMu.Unlock()
if d.Inode.overlay.upper != nil {
// We raced with another doCopyUp, no problem.
return nil
}
// Perform the copy.
return copyUpLocked(ctx, d.parent, d)
}
// copyUpLocked creates a copy of next in the upper filesystem of parent.
//
// copyUpLocked must be called with d.Inode.overlay.copyMu locked.
//
// Returns a generic error on failure.
//
// Preconditions:
// - parent.Inode.overlay.upper must be non-nil.
// - next.Inode.overlay.copyMu must be locked writable.
// - next.Inode.overlay.lower must be non-nil.
// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
// - upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
return syserror.EIO
}
var childUpperInode *Inode
parentUpper := parent.Inode.overlay.upper
root := RootFromContext(ctx)
if root != nil {
defer root.DecRef()
}
// Create the file in the upper filesystem and get an Inode for it.
switch next.Inode.StableAttr.Type {
case RegularFile:
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
return syserror.EIO
}
defer childFile.DecRef()
childUpperInode = childFile.Dirent.Inode
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
case Symlink:
childLower := next.Inode.overlay.lower
link, err := childLower.Readlink(ctx)
if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
return syserror.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
return syserror.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
log.Warningf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
defer childUpper.DecRef()
childUpperInode = childUpper.Inode
default:
panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next))
}
// Bring file attributes up to date. This does not include size, which will be
// brought up to date with copyContentsLocked.
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
log.Warningf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
log.Warningf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
upperMappable := childUpperInode.Mappable()
if lowerMappable != nil && upperMappable == nil {
log.Warningf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name)
return syserror.EIO
}
// Propagate memory mappings to the upper Inode.
next.Inode.overlay.mapsMu.Lock()
defer next.Inode.overlay.mapsMu.Unlock()
if upperMappable != nil {
// Remember which mappings we added so we can remove them on failure.
allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange)
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
added := make(memmap.MappingsOfRange)
for m := range seg.Value() {
if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil {
for m := range added {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
for mr, mappings := range allAdded {
for m := range mappings {
upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable)
}
}
return err
}
added[m] = struct{}{}
}
allAdded[seg.Range()] = added
}
}
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
next.Inode.overlay.dataMu.Lock()
childUpperInode.IncRef()
next.Inode.overlay.upper = childUpperInode
next.Inode.overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
}
}
return nil
}
// cleanupUpper removes name from parent, and panics if it is unsuccessful.
func cleanupUpper(ctx context.Context, parent *Inode, name string) {
if err := parent.InodeOperations.Remove(ctx, parent, name); err != nil {
// Unfortunately we don't have much choice. We shouldn't
// willingly give the caller access to a nonsense filesystem.
panic(fmt.Sprintf("overlay filesystem is in an inconsistent state: failed to remove %q from upper filesystem: %v", name, err))
}
}
// copyUpBuffers is a buffer pool for copying file content. The buffer
// size is the same used by io.Copy.
var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }}
// copyContentsLocked copies the contents of lower to upper. It panics if
// less than size bytes can be copied.
func | copyContentsLocked | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.