file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | #![warn(clippy::pedantic)]
mod error;
mod transport;
use crate::error::Result;
use crate::transport::{HttpQueryTransport, QueryParams};
use bottlerocket_release::BottlerocketRelease;
use chrono::Utc;
use log::debug;
use model::modeled_types::FriendlyVersion;
use semver::Version;
use serde::{Deserialize, Serialize};
use signal_hook::consts::SIGTERM;
use signal_hook::iterator::Signals;
use signpost::State;
use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger};
use snafu::{ErrorCompat, OptionExt, ResultExt};
use std::convert::{TryFrom, TryInto};
use std::fs::{self, File, OpenOptions};
use std::io;
use std::path::Path;
use std::process;
use std::str::FromStr;
use std::thread;
use tough::{Repository, RepositoryLoader};
use update_metadata::{find_migrations, Manifest, Update};
use url::Url;
#[cfg(target_arch = "x86_64")]
const TARGET_ARCH: &str = "x86_64";
#[cfg(target_arch = "aarch64")]
const TARGET_ARCH: &str = "aarch64";
/// The root.json file as required by TUF.
const TRUSTED_ROOT_PATH: &str = "/usr/share/updog/root.json";
/// This is where we store the TUF targets used by migrator after reboot.
const MIGRATION_PATH: &str = "/var/lib/bottlerocket-migrations";
/// This is where we store the TUF metadata used by migrator after reboot.
const METADATA_PATH: &str = "/var/cache/bottlerocket-metadata";
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
enum Command {
CheckUpdate,
Whats,
Prepare,
Update,
UpdateImage,
UpdateApply,
UpdateRevert,
}
#[derive(Debug, Deserialize)]
struct Config {
metadata_base_url: String,
targets_base_url: String,
seed: u32,
version_lock: String,
ignore_waves: bool,
https_proxy: Option<String>,
no_proxy: Option<Vec<String>>,
// TODO API sourced configuration, eg.
// blacklist: Option<Vec<Version>>,
// mode: Option<{Automatic, Managed, Disabled}>
}
/// Prints a more specific message before exiting through usage().
fn usage_msg<S: AsRef<str>>(msg: S) -> ! {
eprintln!("{}\n", msg.as_ref());
usage();
}
fn usage() -> ! {
#[rustfmt::skip]
eprintln!("\
USAGE:
updog <SUBCOMMAND> <OPTIONS>
SUBCOMMANDS:
check-update Show if an update is available
[ -a | --all ] Output all available updates, even if they're not upgrades
[ --ignore-waves ] Ignore release schedule when checking
for a new update
prepare Download update files and migration targets
update Perform an update if available
[ -i | --image version ] Update to a specific image version
[ -n | --now ] Update immediately, ignoring any release schedule
[ -r | --reboot ] Reboot into new update on success
update-image Download & write an update but do not update flags
[ -i | --image version ] Update to a specific image version
[ -n | --now ] Update immediately, ignoring wave limits
[ -t | --timestamp time ] The timestamp to execute an update from
update-apply Update boot flags (after having called update-image)
[ -r | --reboot ] Reboot after updating boot flags
update-revert Revert actions done by 'update-apply'
GLOBAL OPTIONS:
[ -j | --json ] JSON-formatted output
[ --log-level trace|debug|info|warn|error ] Set logging verbosity");
std::process::exit(1)
}
fn load_config() -> Result<Config> {
let path = "/etc/updog.toml";
let s = fs::read_to_string(path).context(error::ConfigReadSnafu { path })?;
let config: Config = toml::from_str(&s).context(error::ConfigParseSnafu { path })?;
Ok(config)
}
fn load_repository(transport: HttpQueryTransport, config: &Config) -> Result<Repository> {
fs::create_dir_all(METADATA_PATH).context(error::CreateMetadataCacheSnafu {
path: METADATA_PATH,
})?;
RepositoryLoader::new(
File::open(TRUSTED_ROOT_PATH).context(error::OpenRootSnafu {
path: TRUSTED_ROOT_PATH,
})?,
Url::parse(&config.metadata_base_url).context(error::UrlParseSnafu {
url: &config.metadata_base_url,
})?,
Url::parse(&config.targets_base_url).context(error::UrlParseSnafu {
url: &config.targets_base_url,
})?,
)
.transport(transport)
.load()
.context(error::MetadataSnafu)
}
fn applicable_updates<'a>(
manifest: &'a Manifest,
variant: &str,
ignore_waves: bool,
seed: u32,
) -> Vec<&'a Update> {
let mut updates: Vec<&Update> = manifest
.updates
.iter()
.filter(|u| {
u.variant == *variant
&& u.arch == TARGET_ARCH
&& u.version <= u.max_version
&& (ignore_waves || u.update_ready(seed, Utc::now()))
})
.collect();
// sort descending
updates.sort_unstable_by(|a, b| b.version.cmp(&a.version));
updates
}
// TODO use config if there is api-sourced configuration that could affect this
// TODO updog.toml may include settings that cause us to ignore/delay
// certain/any updates;
// Ignore Specific Target Version
// Ignore Any Target
// ...
fn update_required<'a>(
manifest: &'a Manifest,
version: &Version,
variant: &str,
ignore_waves: bool,
seed: u32,
version_lock: &str,
force_version: Option<Version>,
) -> Result<Option<&'a Update>> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if let Some(forced_version) = force_version {
return Ok(updates.into_iter().find(|u| u.version == forced_version));
}
if version_lock != "latest" {
// Make sure the version string from the config is a valid version string that might be prefixed with 'v'
let friendly_version_lock =
FriendlyVersion::try_from(version_lock).context(error::BadVersionConfigSnafu {
version_str: version_lock,
})?;
// Convert back to semver::Version
let semver_version_lock =
friendly_version_lock
.try_into()
.context(error::BadVersionSnafu {
version_str: version_lock,
})?;
// If the configured version-lock matches our current version, we won't update to the same version
return if semver_version_lock == *version {
Ok(None)
} else {
Ok(updates
.into_iter()
.find(|u| u.version == semver_version_lock))
};
}
for update in updates {
// If the current running version is greater than the max version ever published,
// or moves us to a valid version <= the maximum version, update.
if *version < update.version || *version > update.max_version {
return Ok(Some(update));
}
}
Ok(None)
}
fn write_target_to_disk<P: AsRef<Path>>(
repository: &Repository,
target: &str,
disk_path: P,
) -> Result<()> {
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
let reader = repository
.read_target(&target)
.context(error::MetadataSnafu)?
.context(error::TargetNotFoundSnafu {
target: target.raw(),
})?;
// Note: the file extension for the compression type we're using should be removed in
// retrieve_migrations below.
let mut reader = lz4::Decoder::new(reader).context(error::Lz4DecodeSnafu {
target: target.raw(),
})?;
let mut f = OpenOptions::new()
.write(true)
.create(true)
.open(disk_path.as_ref())
.context(error::OpenPartitionSnafu {
path: disk_path.as_ref(),
})?;
io::copy(&mut reader, &mut f).context(error::WriteUpdateSnafu)?;
Ok(())
}
/// Store required migrations for an update in persistent storage. All intermediate migrations
/// between the current version and the target version must be retrieved.
fn retrieve_migrations(
repository: &Repository,
query_params: &mut QueryParams,
manifest: &Manifest,
update: &Update,
current_version: &Version,
) -> Result<()> {
// the migrations required for foo to bar and bar to foo are
// the same; we can pretend we're always upgrading from foo to
// bar and use the same logic to obtain the migrations
let target = std::cmp::max(&update.version, current_version);
let start = std::cmp::min(&update.version, current_version);
let dir = Path::new(MIGRATION_PATH);
if !dir.exists() {
fs::create_dir(dir).context(error::DirCreateSnafu { path: &dir })?;
}
// find the list of migrations in the manifest based on our from and to versions.
let mut targets = find_migrations(start, target, manifest)?;
// we need to store the manifest so that migrator can independently and securely determine the
// migration list. this is true even if there are no migrations.
targets.push("manifest.json".to_owned());
repository
.cache(METADATA_PATH, MIGRATION_PATH, Some(&targets), true)
.context(error::RepoCacheMigrationsSnafu)?;
// Set a query parameter listing the required migrations
query_params.add("migrations", targets.join(","));
Ok(())
}
fn update_image(update: &Update, repository: &Repository) -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.clear_inactive();
// Write out the clearing of the inactive partition immediately, because we're about to
// overwrite the partition set with update data and don't want it to be used until we
// know we're done with all components.
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
let inactive = gpt_state.inactive_set();
// TODO Do we want to recover the inactive side on an error?
write_target_to_disk(repository, &update.images.root, &inactive.root)?;
write_target_to_disk(repository, &update.images.boot, &inactive.boot)?;
write_target_to_disk(repository, &update.images.hash, &inactive.hash)?;
gpt_state.mark_inactive_valid();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state
.upgrade_to_inactive()
.context(error::InactivePartitionUpgradeSnafu)?;
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn revert_update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.cancel_upgrade();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn set_common_query_params(
query_params: &mut QueryParams,
current_version: &Version,
config: &Config,
) {
query_params.add("version", current_version.to_string());
query_params.add("seed", config.seed.to_string());
}
/// List any available update that matches the current variant
fn list_updates(
manifest: &Manifest,
variant: &str,
json: bool,
ignore_waves: bool,
seed: u32,
) -> Result<()> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if json {
println!(
"{}",
serde_json::to_string_pretty(&updates).context(error::UpdateSerializeSnafu)?
);
} else {
for u in updates {
eprintln!("{}", &fmt_full_version(u));
}
}
Ok(())
}
/// Struct to hold the specified command line argument values
#[allow(clippy::struct_excessive_bools)]
struct Arguments {
subcommand: String,
log_level: LevelFilter,
json: bool,
ignore_waves: bool,
force_version: Option<Version>,
all: bool,
reboot: bool,
variant: Option<String>,
}
/// Parse the command line arguments to get the user-specified values
fn parse_args(args: std::env::Args) -> Arguments {
let mut subcommand = None;
let mut log_level = None;
let mut update_version = None;
let mut ignore_waves = false;
let mut json = false;
let mut all = false;
let mut reboot = false;
let mut variant = None;
let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--log-level" => {
let log_level_str = iter
.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --log-level"));
log_level =
Some(LevelFilter::from_str(&log_level_str).unwrap_or_else(|_| {
usage_msg(format!("Invalid log level '{log_level_str}'"))
}));
}
"-i" | "--image" => match iter.next() {
Some(v) => match Version::parse(&v) {
Ok(v) => update_version = Some(v),
_ => usage(),
},
_ => usage(),
},
"--variant" => {
variant = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --variant")),
);
}
"-n" | "--now" | "--ignore-waves" => {
ignore_waves = true;
}
"-j" | "--json" => {
json = true;
}
"-r" | "--reboot" => {
reboot = true;
}
"-a" | "--all" => {
all = true;
}
// Assume any arguments not prefixed with '-' is a subcommand
s if !s.starts_with('-') => {
if subcommand.is_some() {
usage();
}
subcommand = Some(s.to_string());
}
_ => usage(),
}
}
Arguments {
subcommand: subcommand.unwrap_or_else(|| usage()),
log_level: log_level.unwrap_or(LevelFilter::Info),
json,
ignore_waves,
force_version: update_version,
all,
reboot,
variant,
}
}
fn fmt_full_version(update: &Update) -> String {
format!("{} {}", update.variant, update.version)
}
fn output<T: Serialize>(json: bool, object: T, string: &str) -> Result<()> {
if json {
println!(
"{}",
serde_json::to_string_pretty(&object).context(error::UpdateSerializeSnafu)?
);
} else {
println!("{string}");
}
Ok(())
}
fn initiate_reboot() -> Result<()> {
// Set up signal handler for termination signals
let mut signals = Signals::new([SIGTERM]).context(error::SignalSnafu)?;
let signals_handle = signals.handle();
thread::spawn(move || {
for _sig in signals.forever() {
// Ignore termination signals in case updog gets terminated
// before getting to exit normally by itself after invoking
// `shutdown -r` to complete the update.
}
});
if let Err(err) = process::Command::new("shutdown") | .arg("-r")
.status()
.context(error::RebootFailureSnafu)
{
// Kill the signal handling thread
signals_handle.close();
return Err(err);
}
Ok(())
}
/// Our underlying HTTP client, reqwest, supports proxies by reading the `HTTPS_PROXY` and `NO_PROXY`
/// environment variables. Bottlerocket services can source proxy.env before running, but updog is
/// not a service, so we read these values from the config file and add them to the environment
/// here.
fn set_https_proxy_environment_variables(
https_proxy: &Option<String>,
no_proxy: &Option<Vec<String>>,
) {
let proxy = match https_proxy {
Some(s) if !s.is_empty() => s.clone(),
// without https_proxy, no_proxy does nothing, so we are done
_ => return,
};
std::env::set_var("HTTPS_PROXY", proxy);
if let Some(no_proxy) = no_proxy {
if !no_proxy.is_empty() {
let no_proxy_string = no_proxy.join(",");
debug!("setting NO_PROXY={}", no_proxy_string);
std::env::set_var("NO_PROXY", &no_proxy_string);
}
}
}
#[allow(clippy::too_many_lines)]
fn main_inner() -> Result<()> {
// Parse and store the arguments passed to the program
let arguments = parse_args(std::env::args());
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(arguments.log_level, LogConfig::default()).context(error::LoggerSnafu)?;
let command =
serde_plain::from_str::<Command>(&arguments.subcommand).unwrap_or_else(|_| usage());
let config = load_config()?;
set_https_proxy_environment_variables(&config.https_proxy, &config.no_proxy);
let current_release = BottlerocketRelease::new().context(error::ReleaseVersionSnafu)?;
let variant = arguments.variant.unwrap_or(current_release.variant_id);
let transport = HttpQueryTransport::new();
// get a shared pointer to the transport's query_params so we can add metrics information to
// the transport's HTTP calls.
let mut query_params = transport.query_params();
set_common_query_params(&mut query_params, ¤t_release.version_id, &config);
let repository = load_repository(transport, &config)?;
let manifest = load_manifest(&repository)?;
let ignore_waves = arguments.ignore_waves || config.ignore_waves;
match command {
Command::CheckUpdate | Command::Whats => {
if arguments.all {
return list_updates(
&manifest,
&variant,
arguments.json,
ignore_waves,
config.seed,
);
}
let update = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)?
.context(error::UpdateNotAvailableSnafu)?;
output(arguments.json, update, &fmt_full_version(update))?;
}
Command::Update | Command::UpdateImage => {
if let Some(u) = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)? {
eprintln!("Starting update to {}", u.version);
query_params.add("target", u.version.to_string());
retrieve_migrations(
&repository,
&mut query_params,
&manifest,
u,
¤t_release.version_id,
)?;
update_image(u, &repository)?;
if command == Command::Update {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
output(
arguments.json,
u,
&format!("Update applied: {}", fmt_full_version(u)),
)?;
} else {
eprintln!("No update required");
}
}
Command::UpdateApply => {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
Command::UpdateRevert => {
revert_update_flags()?;
}
Command::Prepare => {
// TODO unimplemented
}
}
Ok(())
}
fn load_manifest(repository: &tough::Repository) -> Result<Manifest> {
let target = "manifest.json";
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
Manifest::from_json(
repository
.read_target(&target)
.context(error::ManifestLoadSnafu)?
.context(error::ManifestNotFoundSnafu)?,
)
.context(error::ManifestParseSnafu)
}
fn main() -> ! {
std::process::exit(match main_inner() {
Ok(()) => 0,
Err(err) => {
eprintln!("{err}");
if let Some(var) = std::env::var_os("RUST_BACKTRACE") {
if var != "0" {
if let Some(backtrace) = err.backtrace() {
eprintln!("\n{backtrace:?}");
}
}
}
1
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration as TestDuration;
use std::collections::BTreeMap;
use update_metadata::Images;
#[test]
fn test_manifest_json() {
// Loads a general example of a manifest that includes an update with waves,
// a set of migrations, and some datastore mappings.
// This tests checks that it parses and the following properties are correct:
// - the (1.0, 1.1) migrations exist with the migration "migrate_1.1_foo"
// - the image:datastore mappings exist
// - there is a mapping between 1.11.0 and 1.0
let path = "tests/data/example.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(
!manifest.updates.is_empty(),
"Failed to parse update manifest"
);
assert!(
!manifest.migrations.is_empty(),
"Failed to parse migrations"
);
let from = Version::parse("1.11.0").unwrap();
let to = Version::parse("1.12.0").unwrap();
assert!(manifest
.migrations
.contains_key(&(from.clone(), to.clone())));
let migration = manifest.migrations.get(&(from, to)).unwrap();
assert!(migration[0] == "migrate_1.12.0_foo");
}
#[test]
fn test_serde_reader() {
// A basic manifest with a single update, no migrations, and two
// image:datastore mappings
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(!manifest.updates.is_empty());
}
#[test]
fn test_versions() {
// A manifest with a single update whose version exceeds the max version.
// update in manifest has
// - version: 1.25.0
// - max_version: 1.20.0
let path = "tests/data/regret.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.18.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
assert!(
update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None
)
.unwrap()
.is_none(),
"Updog tried to exceed max_version"
);
}
#[test]
fn older_versions() {
// A manifest with two updates, both less than 0.1.3.
// Use a architecture specific JSON payload, otherwise updog will ignore the update
let path = format!("tests/data/example_3_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 1487,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("0.1.3").unwrap();
let variant = String::from("aws-k8s-1.15");
let update = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap();
assert!(update.is_some(), "Updog ignored max version");
assert!(
update.unwrap().version == Version::parse("0.1.2").unwrap(),
"Updog didn't choose the most recent valid version"
);
}
#[test]
fn test_multiple() {
// A manifest with four updates; two valid, one which exceeds the max
// version, and one which is for the opposite target architecture. This asserts that
// upgrading from the version 1.10.0 results in updating to 1.15.0
// instead of 1.13.0 (lower), 1.25.0 (too high), or 1.16.0 (wrong arch).
let path = format!("tests/data/multiple_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.10.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
let result = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap();
assert!(result.is_some(), "Updog failed to find an update");
if let Some(u) = result {
assert!(
u.version == Version::parse("1.15.0").unwrap(),
"Incorrect version: {}, should be 1.15.0",
u.version
);
}
}
#[test]
fn force_update_version() {
// A manifest with four updates; two valid, one which exceeds the max
// version, and one which is for the opposite target architecture. This tests forces
// a downgrade to 1.13.0, instead of 1.15.0 like it would be in the
// above test, test_multiple.
let path = format!("tests/data/multiple_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.10.0").unwrap();
let forced = Version::parse("1.13.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
let result = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
Some(forced),
)
.unwrap();
assert!(result.is_some(), "Updog failed to find an update");
if let Some(u) = result {
assert!(
u.version == Version::parse("1.13.0").unwrap(),
"Incorrect version: {}, should be forced to 1.13.0",
u.version
);
}
}
#[test]
fn bad_bound() {
// This manifest has an invalid key for one of the update's waves
assert!(
serde_json::from_str::<Manifest>(include_str!("../tests/data/bad-bound.json")).is_err()
);
}
#[test]
fn duplicate_bound() {
// This manifest has two waves with a bound id of 0
assert!(serde_json::from_str::<Manifest>(include_str!(
"../tests/data/duplicate-bound.json"
))
.is_err());
}
#[test]
fn serialize_metadata() {
// A basic manifest with a single update
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(serde_json::to_string_pretty(&manifest)
.context(error::UpdateSerializeSnafu)
.is_ok());
}
#[test]
/// Make sure that `update_required()` doesn't return true unless the client's
/// wave is also ready.
fn check_update_waves() {
let mut manifest = Manifest::default();
let mut update = Update {
variant: String::from("aws-k8s-1.15"),
arch: String::from(TARGET_ARCH),
version: Version::parse("1.1.1").unwrap(),
max_version: Version::parse("1.1.1").unwrap(),
waves: BTreeMap::new(),
images: Images {
boot: String::from("boot"),
root: String::from("boot"),
hash: String::from("boot"),
},
};
let current_version = Version::parse("1.0.0").unwrap();
let variant = String::from("aws-k8s-1.15");
let first_wave_seed = 0;
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: first_wave_seed,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
// Two waves; the 1st wave that starts immediately, and the final wave which starts in one hour
let time = Utc::now();
update.waves.insert(0, time);
update.waves.insert(1024, time + TestDuration::hours(1));
update.waves.insert(2048, time + TestDuration::hours(1));
manifest.updates.push(update);
assert!(
update_required(
&manifest,
¤t_version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap()
.is_some(),
"1st wave doesn't appear ready"
);
assert!(
update_required(
&manifest,
¤t_version,
&variant,
config.ignore_waves,
2000,
&config.version_lock,
None,
)
.unwrap()
.is_none(),
"Later wave incorrectly sees update"
);
}
} | random_line_split |
|
main.rs | #![warn(clippy::pedantic)]
mod error;
mod transport;
use crate::error::Result;
use crate::transport::{HttpQueryTransport, QueryParams};
use bottlerocket_release::BottlerocketRelease;
use chrono::Utc;
use log::debug;
use model::modeled_types::FriendlyVersion;
use semver::Version;
use serde::{Deserialize, Serialize};
use signal_hook::consts::SIGTERM;
use signal_hook::iterator::Signals;
use signpost::State;
use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger};
use snafu::{ErrorCompat, OptionExt, ResultExt};
use std::convert::{TryFrom, TryInto};
use std::fs::{self, File, OpenOptions};
use std::io;
use std::path::Path;
use std::process;
use std::str::FromStr;
use std::thread;
use tough::{Repository, RepositoryLoader};
use update_metadata::{find_migrations, Manifest, Update};
use url::Url;
#[cfg(target_arch = "x86_64")]
const TARGET_ARCH: &str = "x86_64";
#[cfg(target_arch = "aarch64")]
const TARGET_ARCH: &str = "aarch64";
/// The root.json file as required by TUF.
const TRUSTED_ROOT_PATH: &str = "/usr/share/updog/root.json";
/// This is where we store the TUF targets used by migrator after reboot.
const MIGRATION_PATH: &str = "/var/lib/bottlerocket-migrations";
/// This is where we store the TUF metadata used by migrator after reboot.
const METADATA_PATH: &str = "/var/cache/bottlerocket-metadata";
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
enum Command {
CheckUpdate,
Whats,
Prepare,
Update,
UpdateImage,
UpdateApply,
UpdateRevert,
}
#[derive(Debug, Deserialize)]
struct Config {
metadata_base_url: String,
targets_base_url: String,
seed: u32,
version_lock: String,
ignore_waves: bool,
https_proxy: Option<String>,
no_proxy: Option<Vec<String>>,
// TODO API sourced configuration, eg.
// blacklist: Option<Vec<Version>>,
// mode: Option<{Automatic, Managed, Disabled}>
}
/// Prints a more specific message before exiting through usage().
fn usage_msg<S: AsRef<str>>(msg: S) -> ! {
eprintln!("{}\n", msg.as_ref());
usage();
}
fn usage() -> ! {
#[rustfmt::skip]
eprintln!("\
USAGE:
updog <SUBCOMMAND> <OPTIONS>
SUBCOMMANDS:
check-update Show if an update is available
[ -a | --all ] Output all available updates, even if they're not upgrades
[ --ignore-waves ] Ignore release schedule when checking
for a new update
prepare Download update files and migration targets
update Perform an update if available
[ -i | --image version ] Update to a specific image version
[ -n | --now ] Update immediately, ignoring any release schedule
[ -r | --reboot ] Reboot into new update on success
update-image Download & write an update but do not update flags
[ -i | --image version ] Update to a specific image version
[ -n | --now ] Update immediately, ignoring wave limits
[ -t | --timestamp time ] The timestamp to execute an update from
update-apply Update boot flags (after having called update-image)
[ -r | --reboot ] Reboot after updating boot flags
update-revert Revert actions done by 'update-apply'
GLOBAL OPTIONS:
[ -j | --json ] JSON-formatted output
[ --log-level trace|debug|info|warn|error ] Set logging verbosity");
std::process::exit(1)
}
fn load_config() -> Result<Config> {
let path = "/etc/updog.toml";
let s = fs::read_to_string(path).context(error::ConfigReadSnafu { path })?;
let config: Config = toml::from_str(&s).context(error::ConfigParseSnafu { path })?;
Ok(config)
}
fn load_repository(transport: HttpQueryTransport, config: &Config) -> Result<Repository> {
fs::create_dir_all(METADATA_PATH).context(error::CreateMetadataCacheSnafu {
path: METADATA_PATH,
})?;
RepositoryLoader::new(
File::open(TRUSTED_ROOT_PATH).context(error::OpenRootSnafu {
path: TRUSTED_ROOT_PATH,
})?,
Url::parse(&config.metadata_base_url).context(error::UrlParseSnafu {
url: &config.metadata_base_url,
})?,
Url::parse(&config.targets_base_url).context(error::UrlParseSnafu {
url: &config.targets_base_url,
})?,
)
.transport(transport)
.load()
.context(error::MetadataSnafu)
}
fn applicable_updates<'a>(
manifest: &'a Manifest,
variant: &str,
ignore_waves: bool,
seed: u32,
) -> Vec<&'a Update> {
let mut updates: Vec<&Update> = manifest
.updates
.iter()
.filter(|u| {
u.variant == *variant
&& u.arch == TARGET_ARCH
&& u.version <= u.max_version
&& (ignore_waves || u.update_ready(seed, Utc::now()))
})
.collect();
// sort descending
updates.sort_unstable_by(|a, b| b.version.cmp(&a.version));
updates
}
// TODO use config if there is api-sourced configuration that could affect this
// TODO updog.toml may include settings that cause us to ignore/delay
// certain/any updates;
// Ignore Specific Target Version
// Ignore Any Target
// ...
fn update_required<'a>(
manifest: &'a Manifest,
version: &Version,
variant: &str,
ignore_waves: bool,
seed: u32,
version_lock: &str,
force_version: Option<Version>,
) -> Result<Option<&'a Update>> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if let Some(forced_version) = force_version {
return Ok(updates.into_iter().find(|u| u.version == forced_version));
}
if version_lock != "latest" {
// Make sure the version string from the config is a valid version string that might be prefixed with 'v'
let friendly_version_lock =
FriendlyVersion::try_from(version_lock).context(error::BadVersionConfigSnafu {
version_str: version_lock,
})?;
// Convert back to semver::Version
let semver_version_lock =
friendly_version_lock
.try_into()
.context(error::BadVersionSnafu {
version_str: version_lock,
})?;
// If the configured version-lock matches our current version, we won't update to the same version
return if semver_version_lock == *version {
Ok(None)
} else {
Ok(updates
.into_iter()
.find(|u| u.version == semver_version_lock))
};
}
for update in updates {
// If the current running version is greater than the max version ever published,
// or moves us to a valid version <= the maximum version, update.
if *version < update.version || *version > update.max_version {
return Ok(Some(update));
}
}
Ok(None)
}
fn write_target_to_disk<P: AsRef<Path>>(
repository: &Repository,
target: &str,
disk_path: P,
) -> Result<()> {
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
let reader = repository
.read_target(&target)
.context(error::MetadataSnafu)?
.context(error::TargetNotFoundSnafu {
target: target.raw(),
})?;
// Note: the file extension for the compression type we're using should be removed in
// retrieve_migrations below.
let mut reader = lz4::Decoder::new(reader).context(error::Lz4DecodeSnafu {
target: target.raw(),
})?;
let mut f = OpenOptions::new()
.write(true)
.create(true)
.open(disk_path.as_ref())
.context(error::OpenPartitionSnafu {
path: disk_path.as_ref(),
})?;
io::copy(&mut reader, &mut f).context(error::WriteUpdateSnafu)?;
Ok(())
}
/// Store required migrations for an update in persistent storage. All intermediate migrations
/// between the current version and the target version must be retrieved.
fn retrieve_migrations(
repository: &Repository,
query_params: &mut QueryParams,
manifest: &Manifest,
update: &Update,
current_version: &Version,
) -> Result<()> {
// the migrations required for foo to bar and bar to foo are
// the same; we can pretend we're always upgrading from foo to
// bar and use the same logic to obtain the migrations
let target = std::cmp::max(&update.version, current_version);
let start = std::cmp::min(&update.version, current_version);
let dir = Path::new(MIGRATION_PATH);
if !dir.exists() {
fs::create_dir(dir).context(error::DirCreateSnafu { path: &dir })?;
}
// find the list of migrations in the manifest based on our from and to versions.
let mut targets = find_migrations(start, target, manifest)?;
// we need to store the manifest so that migrator can independently and securely determine the
// migration list. this is true even if there are no migrations.
targets.push("manifest.json".to_owned());
repository
.cache(METADATA_PATH, MIGRATION_PATH, Some(&targets), true)
.context(error::RepoCacheMigrationsSnafu)?;
// Set a query parameter listing the required migrations
query_params.add("migrations", targets.join(","));
Ok(())
}
fn update_image(update: &Update, repository: &Repository) -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.clear_inactive();
// Write out the clearing of the inactive partition immediately, because we're about to
// overwrite the partition set with update data and don't want it to be used until we
// know we're done with all components.
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
let inactive = gpt_state.inactive_set();
// TODO Do we want to recover the inactive side on an error?
write_target_to_disk(repository, &update.images.root, &inactive.root)?;
write_target_to_disk(repository, &update.images.boot, &inactive.boot)?;
write_target_to_disk(repository, &update.images.hash, &inactive.hash)?;
gpt_state.mark_inactive_valid();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state
.upgrade_to_inactive()
.context(error::InactivePartitionUpgradeSnafu)?;
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn | () -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.cancel_upgrade();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn set_common_query_params(
query_params: &mut QueryParams,
current_version: &Version,
config: &Config,
) {
query_params.add("version", current_version.to_string());
query_params.add("seed", config.seed.to_string());
}
/// List any available update that matches the current variant
fn list_updates(
manifest: &Manifest,
variant: &str,
json: bool,
ignore_waves: bool,
seed: u32,
) -> Result<()> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if json {
println!(
"{}",
serde_json::to_string_pretty(&updates).context(error::UpdateSerializeSnafu)?
);
} else {
for u in updates {
eprintln!("{}", &fmt_full_version(u));
}
}
Ok(())
}
/// Struct to hold the specified command line argument values
#[allow(clippy::struct_excessive_bools)]
struct Arguments {
subcommand: String,
log_level: LevelFilter,
json: bool,
ignore_waves: bool,
force_version: Option<Version>,
all: bool,
reboot: bool,
variant: Option<String>,
}
/// Parse the command line arguments to get the user-specified values
fn parse_args(args: std::env::Args) -> Arguments {
let mut subcommand = None;
let mut log_level = None;
let mut update_version = None;
let mut ignore_waves = false;
let mut json = false;
let mut all = false;
let mut reboot = false;
let mut variant = None;
let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--log-level" => {
let log_level_str = iter
.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --log-level"));
log_level =
Some(LevelFilter::from_str(&log_level_str).unwrap_or_else(|_| {
usage_msg(format!("Invalid log level '{log_level_str}'"))
}));
}
"-i" | "--image" => match iter.next() {
Some(v) => match Version::parse(&v) {
Ok(v) => update_version = Some(v),
_ => usage(),
},
_ => usage(),
},
"--variant" => {
variant = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --variant")),
);
}
"-n" | "--now" | "--ignore-waves" => {
ignore_waves = true;
}
"-j" | "--json" => {
json = true;
}
"-r" | "--reboot" => {
reboot = true;
}
"-a" | "--all" => {
all = true;
}
// Assume any arguments not prefixed with '-' is a subcommand
s if !s.starts_with('-') => {
if subcommand.is_some() {
usage();
}
subcommand = Some(s.to_string());
}
_ => usage(),
}
}
Arguments {
subcommand: subcommand.unwrap_or_else(|| usage()),
log_level: log_level.unwrap_or(LevelFilter::Info),
json,
ignore_waves,
force_version: update_version,
all,
reboot,
variant,
}
}
fn fmt_full_version(update: &Update) -> String {
format!("{} {}", update.variant, update.version)
}
fn output<T: Serialize>(json: bool, object: T, string: &str) -> Result<()> {
if json {
println!(
"{}",
serde_json::to_string_pretty(&object).context(error::UpdateSerializeSnafu)?
);
} else {
println!("{string}");
}
Ok(())
}
fn initiate_reboot() -> Result<()> {
// Set up signal handler for termination signals
let mut signals = Signals::new([SIGTERM]).context(error::SignalSnafu)?;
let signals_handle = signals.handle();
thread::spawn(move || {
for _sig in signals.forever() {
// Ignore termination signals in case updog gets terminated
// before getting to exit normally by itself after invoking
// `shutdown -r` to complete the update.
}
});
if let Err(err) = process::Command::new("shutdown")
.arg("-r")
.status()
.context(error::RebootFailureSnafu)
{
// Kill the signal handling thread
signals_handle.close();
return Err(err);
}
Ok(())
}
/// Our underlying HTTP client, reqwest, supports proxies by reading the `HTTPS_PROXY` and `NO_PROXY`
/// environment variables. Bottlerocket services can source proxy.env before running, but updog is
/// not a service, so we read these values from the config file and add them to the environment
/// here.
fn set_https_proxy_environment_variables(
https_proxy: &Option<String>,
no_proxy: &Option<Vec<String>>,
) {
let proxy = match https_proxy {
Some(s) if !s.is_empty() => s.clone(),
// without https_proxy, no_proxy does nothing, so we are done
_ => return,
};
std::env::set_var("HTTPS_PROXY", proxy);
if let Some(no_proxy) = no_proxy {
if !no_proxy.is_empty() {
let no_proxy_string = no_proxy.join(",");
debug!("setting NO_PROXY={}", no_proxy_string);
std::env::set_var("NO_PROXY", &no_proxy_string);
}
}
}
#[allow(clippy::too_many_lines)]
fn main_inner() -> Result<()> {
// Parse and store the arguments passed to the program
let arguments = parse_args(std::env::args());
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(arguments.log_level, LogConfig::default()).context(error::LoggerSnafu)?;
let command =
serde_plain::from_str::<Command>(&arguments.subcommand).unwrap_or_else(|_| usage());
let config = load_config()?;
set_https_proxy_environment_variables(&config.https_proxy, &config.no_proxy);
let current_release = BottlerocketRelease::new().context(error::ReleaseVersionSnafu)?;
let variant = arguments.variant.unwrap_or(current_release.variant_id);
let transport = HttpQueryTransport::new();
// get a shared pointer to the transport's query_params so we can add metrics information to
// the transport's HTTP calls.
let mut query_params = transport.query_params();
set_common_query_params(&mut query_params, ¤t_release.version_id, &config);
let repository = load_repository(transport, &config)?;
let manifest = load_manifest(&repository)?;
let ignore_waves = arguments.ignore_waves || config.ignore_waves;
match command {
Command::CheckUpdate | Command::Whats => {
if arguments.all {
return list_updates(
&manifest,
&variant,
arguments.json,
ignore_waves,
config.seed,
);
}
let update = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)?
.context(error::UpdateNotAvailableSnafu)?;
output(arguments.json, update, &fmt_full_version(update))?;
}
Command::Update | Command::UpdateImage => {
if let Some(u) = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)? {
eprintln!("Starting update to {}", u.version);
query_params.add("target", u.version.to_string());
retrieve_migrations(
&repository,
&mut query_params,
&manifest,
u,
¤t_release.version_id,
)?;
update_image(u, &repository)?;
if command == Command::Update {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
output(
arguments.json,
u,
&format!("Update applied: {}", fmt_full_version(u)),
)?;
} else {
eprintln!("No update required");
}
}
Command::UpdateApply => {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
Command::UpdateRevert => {
revert_update_flags()?;
}
Command::Prepare => {
// TODO unimplemented
}
}
Ok(())
}
fn load_manifest(repository: &tough::Repository) -> Result<Manifest> {
let target = "manifest.json";
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
Manifest::from_json(
repository
.read_target(&target)
.context(error::ManifestLoadSnafu)?
.context(error::ManifestNotFoundSnafu)?,
)
.context(error::ManifestParseSnafu)
}
fn main() -> ! {
std::process::exit(match main_inner() {
Ok(()) => 0,
Err(err) => {
eprintln!("{err}");
if let Some(var) = std::env::var_os("RUST_BACKTRACE") {
if var != "0" {
if let Some(backtrace) = err.backtrace() {
eprintln!("\n{backtrace:?}");
}
}
}
1
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration as TestDuration;
use std::collections::BTreeMap;
use update_metadata::Images;
#[test]
fn test_manifest_json() {
// Loads a general example of a manifest that includes an update with waves,
// a set of migrations, and some datastore mappings.
// This tests checks that it parses and the following properties are correct:
// - the (1.0, 1.1) migrations exist with the migration "migrate_1.1_foo"
// - the image:datastore mappings exist
// - there is a mapping between 1.11.0 and 1.0
let path = "tests/data/example.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(
!manifest.updates.is_empty(),
"Failed to parse update manifest"
);
assert!(
!manifest.migrations.is_empty(),
"Failed to parse migrations"
);
let from = Version::parse("1.11.0").unwrap();
let to = Version::parse("1.12.0").unwrap();
assert!(manifest
.migrations
.contains_key(&(from.clone(), to.clone())));
let migration = manifest.migrations.get(&(from, to)).unwrap();
assert!(migration[0] == "migrate_1.12.0_foo");
}
#[test]
fn test_serde_reader() {
// A basic manifest with a single update, no migrations, and two
// image:datastore mappings
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(!manifest.updates.is_empty());
}
#[test]
fn test_versions() {
// A manifest with a single update whose version exceeds the max version.
// update in manifest has
// - version: 1.25.0
// - max_version: 1.20.0
let path = "tests/data/regret.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.18.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
assert!(
update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None
)
.unwrap()
.is_none(),
"Updog tried to exceed max_version"
);
}
#[test]
fn older_versions() {
// A manifest with two updates, both less than 0.1.3.
// Use a architecture specific JSON payload, otherwise updog will ignore the update
let path = format!("tests/data/example_3_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 1487,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("0.1.3").unwrap();
let variant = String::from("aws-k8s-1.15");
let update = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap();
assert!(update.is_some(), "Updog ignored max version");
assert!(
update.unwrap().version == Version::parse("0.1.2").unwrap(),
"Updog didn't choose the most recent valid version"
);
}
#[test]
fn test_multiple() {
// A manifest with four updates; two valid, one which exceeds the max
// version, and one which is for the opposite target architecture. This asserts that
// upgrading from the version 1.10.0 results in updating to 1.15.0
// instead of 1.13.0 (lower), 1.25.0 (too high), or 1.16.0 (wrong arch).
let path = format!("tests/data/multiple_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.10.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
let result = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap();
assert!(result.is_some(), "Updog failed to find an update");
if let Some(u) = result {
assert!(
u.version == Version::parse("1.15.0").unwrap(),
"Incorrect version: {}, should be 1.15.0",
u.version
);
}
}
#[test]
fn force_update_version() {
// A manifest with four updates; two valid, one which exceeds the max
// version, and one which is for the opposite target architecture. This tests forces
// a downgrade to 1.13.0, instead of 1.15.0 like it would be in the
// above test, test_multiple.
let path = format!("tests/data/multiple_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.10.0").unwrap();
let forced = Version::parse("1.13.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
let result = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
Some(forced),
)
.unwrap();
assert!(result.is_some(), "Updog failed to find an update");
if let Some(u) = result {
assert!(
u.version == Version::parse("1.13.0").unwrap(),
"Incorrect version: {}, should be forced to 1.13.0",
u.version
);
}
}
#[test]
fn bad_bound() {
// This manifest has an invalid key for one of the update's waves
assert!(
serde_json::from_str::<Manifest>(include_str!("../tests/data/bad-bound.json")).is_err()
);
}
#[test]
fn duplicate_bound() {
// This manifest has two waves with a bound id of 0
assert!(serde_json::from_str::<Manifest>(include_str!(
"../tests/data/duplicate-bound.json"
))
.is_err());
}
#[test]
fn serialize_metadata() {
// A basic manifest with a single update
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(serde_json::to_string_pretty(&manifest)
.context(error::UpdateSerializeSnafu)
.is_ok());
}
#[test]
/// Make sure that `update_required()` doesn't return true unless the client's
/// wave is also ready.
fn check_update_waves() {
let mut manifest = Manifest::default();
let mut update = Update {
variant: String::from("aws-k8s-1.15"),
arch: String::from(TARGET_ARCH),
version: Version::parse("1.1.1").unwrap(),
max_version: Version::parse("1.1.1").unwrap(),
waves: BTreeMap::new(),
images: Images {
boot: String::from("boot"),
root: String::from("boot"),
hash: String::from("boot"),
},
};
let current_version = Version::parse("1.0.0").unwrap();
let variant = String::from("aws-k8s-1.15");
let first_wave_seed = 0;
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: first_wave_seed,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
// Two waves; the 1st wave that starts immediately, and the final wave which starts in one hour
let time = Utc::now();
update.waves.insert(0, time);
update.waves.insert(1024, time + TestDuration::hours(1));
update.waves.insert(2048, time + TestDuration::hours(1));
manifest.updates.push(update);
assert!(
update_required(
&manifest,
¤t_version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap()
.is_some(),
"1st wave doesn't appear ready"
);
assert!(
update_required(
&manifest,
¤t_version,
&variant,
config.ignore_waves,
2000,
&config.version_lock,
None,
)
.unwrap()
.is_none(),
"Later wave incorrectly sees update"
);
}
}
| revert_update_flags | identifier_name |
main.rs | #![warn(clippy::pedantic)]
mod error;
mod transport;
use crate::error::Result;
use crate::transport::{HttpQueryTransport, QueryParams};
use bottlerocket_release::BottlerocketRelease;
use chrono::Utc;
use log::debug;
use model::modeled_types::FriendlyVersion;
use semver::Version;
use serde::{Deserialize, Serialize};
use signal_hook::consts::SIGTERM;
use signal_hook::iterator::Signals;
use signpost::State;
use simplelog::{Config as LogConfig, LevelFilter, SimpleLogger};
use snafu::{ErrorCompat, OptionExt, ResultExt};
use std::convert::{TryFrom, TryInto};
use std::fs::{self, File, OpenOptions};
use std::io;
use std::path::Path;
use std::process;
use std::str::FromStr;
use std::thread;
use tough::{Repository, RepositoryLoader};
use update_metadata::{find_migrations, Manifest, Update};
use url::Url;
#[cfg(target_arch = "x86_64")]
const TARGET_ARCH: &str = "x86_64";
#[cfg(target_arch = "aarch64")]
const TARGET_ARCH: &str = "aarch64";
/// The root.json file as required by TUF.
const TRUSTED_ROOT_PATH: &str = "/usr/share/updog/root.json";
/// This is where we store the TUF targets used by migrator after reboot.
const MIGRATION_PATH: &str = "/var/lib/bottlerocket-migrations";
/// This is where we store the TUF metadata used by migrator after reboot.
const METADATA_PATH: &str = "/var/cache/bottlerocket-metadata";
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
enum Command {
CheckUpdate,
Whats,
Prepare,
Update,
UpdateImage,
UpdateApply,
UpdateRevert,
}
#[derive(Debug, Deserialize)]
struct Config {
metadata_base_url: String,
targets_base_url: String,
seed: u32,
version_lock: String,
ignore_waves: bool,
https_proxy: Option<String>,
no_proxy: Option<Vec<String>>,
// TODO API sourced configuration, eg.
// blacklist: Option<Vec<Version>>,
// mode: Option<{Automatic, Managed, Disabled}>
}
/// Prints a more specific message before exiting through usage().
fn usage_msg<S: AsRef<str>>(msg: S) -> ! {
eprintln!("{}\n", msg.as_ref());
usage();
}
fn usage() -> ! {
#[rustfmt::skip]
eprintln!("\
USAGE:
updog <SUBCOMMAND> <OPTIONS>
SUBCOMMANDS:
check-update Show if an update is available
[ -a | --all ] Output all available updates, even if they're not upgrades
[ --ignore-waves ] Ignore release schedule when checking
for a new update
prepare Download update files and migration targets
update Perform an update if available
[ -i | --image version ] Update to a specific image version
[ -n | --now ] Update immediately, ignoring any release schedule
[ -r | --reboot ] Reboot into new update on success
update-image Download & write an update but do not update flags
[ -i | --image version ] Update to a specific image version
[ -n | --now ] Update immediately, ignoring wave limits
[ -t | --timestamp time ] The timestamp to execute an update from
update-apply Update boot flags (after having called update-image)
[ -r | --reboot ] Reboot after updating boot flags
update-revert Revert actions done by 'update-apply'
GLOBAL OPTIONS:
[ -j | --json ] JSON-formatted output
[ --log-level trace|debug|info|warn|error ] Set logging verbosity");
std::process::exit(1)
}
fn load_config() -> Result<Config> {
let path = "/etc/updog.toml";
let s = fs::read_to_string(path).context(error::ConfigReadSnafu { path })?;
let config: Config = toml::from_str(&s).context(error::ConfigParseSnafu { path })?;
Ok(config)
}
fn load_repository(transport: HttpQueryTransport, config: &Config) -> Result<Repository> {
fs::create_dir_all(METADATA_PATH).context(error::CreateMetadataCacheSnafu {
path: METADATA_PATH,
})?;
RepositoryLoader::new(
File::open(TRUSTED_ROOT_PATH).context(error::OpenRootSnafu {
path: TRUSTED_ROOT_PATH,
})?,
Url::parse(&config.metadata_base_url).context(error::UrlParseSnafu {
url: &config.metadata_base_url,
})?,
Url::parse(&config.targets_base_url).context(error::UrlParseSnafu {
url: &config.targets_base_url,
})?,
)
.transport(transport)
.load()
.context(error::MetadataSnafu)
}
fn applicable_updates<'a>(
manifest: &'a Manifest,
variant: &str,
ignore_waves: bool,
seed: u32,
) -> Vec<&'a Update> {
let mut updates: Vec<&Update> = manifest
.updates
.iter()
.filter(|u| {
u.variant == *variant
&& u.arch == TARGET_ARCH
&& u.version <= u.max_version
&& (ignore_waves || u.update_ready(seed, Utc::now()))
})
.collect();
// sort descending
updates.sort_unstable_by(|a, b| b.version.cmp(&a.version));
updates
}
// TODO use config if there is api-sourced configuration that could affect this
// TODO updog.toml may include settings that cause us to ignore/delay
// certain/any updates;
// Ignore Specific Target Version
// Ignore Any Target
// ...
fn update_required<'a>(
manifest: &'a Manifest,
version: &Version,
variant: &str,
ignore_waves: bool,
seed: u32,
version_lock: &str,
force_version: Option<Version>,
) -> Result<Option<&'a Update>> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if let Some(forced_version) = force_version {
return Ok(updates.into_iter().find(|u| u.version == forced_version));
}
if version_lock != "latest" {
// Make sure the version string from the config is a valid version string that might be prefixed with 'v'
let friendly_version_lock =
FriendlyVersion::try_from(version_lock).context(error::BadVersionConfigSnafu {
version_str: version_lock,
})?;
// Convert back to semver::Version
let semver_version_lock =
friendly_version_lock
.try_into()
.context(error::BadVersionSnafu {
version_str: version_lock,
})?;
// If the configured version-lock matches our current version, we won't update to the same version
return if semver_version_lock == *version {
Ok(None)
} else {
Ok(updates
.into_iter()
.find(|u| u.version == semver_version_lock))
};
}
for update in updates {
// If the current running version is greater than the max version ever published,
// or moves us to a valid version <= the maximum version, update.
if *version < update.version || *version > update.max_version {
return Ok(Some(update));
}
}
Ok(None)
}
fn write_target_to_disk<P: AsRef<Path>>(
repository: &Repository,
target: &str,
disk_path: P,
) -> Result<()> {
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
let reader = repository
.read_target(&target)
.context(error::MetadataSnafu)?
.context(error::TargetNotFoundSnafu {
target: target.raw(),
})?;
// Note: the file extension for the compression type we're using should be removed in
// retrieve_migrations below.
let mut reader = lz4::Decoder::new(reader).context(error::Lz4DecodeSnafu {
target: target.raw(),
})?;
let mut f = OpenOptions::new()
.write(true)
.create(true)
.open(disk_path.as_ref())
.context(error::OpenPartitionSnafu {
path: disk_path.as_ref(),
})?;
io::copy(&mut reader, &mut f).context(error::WriteUpdateSnafu)?;
Ok(())
}
/// Store required migrations for an update in persistent storage. All intermediate migrations
/// between the current version and the target version must be retrieved.
fn retrieve_migrations(
repository: &Repository,
query_params: &mut QueryParams,
manifest: &Manifest,
update: &Update,
current_version: &Version,
) -> Result<()> {
// the migrations required for foo to bar and bar to foo are
// the same; we can pretend we're always upgrading from foo to
// bar and use the same logic to obtain the migrations
let target = std::cmp::max(&update.version, current_version);
let start = std::cmp::min(&update.version, current_version);
let dir = Path::new(MIGRATION_PATH);
if !dir.exists() {
fs::create_dir(dir).context(error::DirCreateSnafu { path: &dir })?;
}
// find the list of migrations in the manifest based on our from and to versions.
let mut targets = find_migrations(start, target, manifest)?;
// we need to store the manifest so that migrator can independently and securely determine the
// migration list. this is true even if there are no migrations.
targets.push("manifest.json".to_owned());
repository
.cache(METADATA_PATH, MIGRATION_PATH, Some(&targets), true)
.context(error::RepoCacheMigrationsSnafu)?;
// Set a query parameter listing the required migrations
query_params.add("migrations", targets.join(","));
Ok(())
}
fn update_image(update: &Update, repository: &Repository) -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.clear_inactive();
// Write out the clearing of the inactive partition immediately, because we're about to
// overwrite the partition set with update data and don't want it to be used until we
// know we're done with all components.
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
let inactive = gpt_state.inactive_set();
// TODO Do we want to recover the inactive side on an error?
write_target_to_disk(repository, &update.images.root, &inactive.root)?;
write_target_to_disk(repository, &update.images.boot, &inactive.boot)?;
write_target_to_disk(repository, &update.images.hash, &inactive.hash)?;
gpt_state.mark_inactive_valid();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state
.upgrade_to_inactive()
.context(error::InactivePartitionUpgradeSnafu)?;
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn revert_update_flags() -> Result<()> {
let mut gpt_state = State::load().context(error::PartitionTableReadSnafu)?;
gpt_state.cancel_upgrade();
gpt_state.write().context(error::PartitionTableWriteSnafu)?;
Ok(())
}
fn set_common_query_params(
query_params: &mut QueryParams,
current_version: &Version,
config: &Config,
) {
query_params.add("version", current_version.to_string());
query_params.add("seed", config.seed.to_string());
}
/// List any available update that matches the current variant
fn list_updates(
manifest: &Manifest,
variant: &str,
json: bool,
ignore_waves: bool,
seed: u32,
) -> Result<()> {
let updates = applicable_updates(manifest, variant, ignore_waves, seed);
if json {
println!(
"{}",
serde_json::to_string_pretty(&updates).context(error::UpdateSerializeSnafu)?
);
} else {
for u in updates {
eprintln!("{}", &fmt_full_version(u));
}
}
Ok(())
}
/// Struct to hold the specified command line argument values
#[allow(clippy::struct_excessive_bools)]
struct Arguments {
subcommand: String,
log_level: LevelFilter,
json: bool,
ignore_waves: bool,
force_version: Option<Version>,
all: bool,
reboot: bool,
variant: Option<String>,
}
/// Parse the command line arguments to get the user-specified values
fn parse_args(args: std::env::Args) -> Arguments {
let mut subcommand = None;
let mut log_level = None;
let mut update_version = None;
let mut ignore_waves = false;
let mut json = false;
let mut all = false;
let mut reboot = false;
let mut variant = None;
let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--log-level" => {
let log_level_str = iter
.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --log-level"));
log_level =
Some(LevelFilter::from_str(&log_level_str).unwrap_or_else(|_| {
usage_msg(format!("Invalid log level '{log_level_str}'"))
}));
}
"-i" | "--image" => match iter.next() {
Some(v) => match Version::parse(&v) {
Ok(v) => update_version = Some(v),
_ => usage(),
},
_ => usage(),
},
"--variant" => {
variant = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --variant")),
);
}
"-n" | "--now" | "--ignore-waves" => {
ignore_waves = true;
}
"-j" | "--json" => {
json = true;
}
"-r" | "--reboot" => {
reboot = true;
}
"-a" | "--all" => {
all = true;
}
// Assume any arguments not prefixed with '-' is a subcommand
s if !s.starts_with('-') => {
if subcommand.is_some() {
usage();
}
subcommand = Some(s.to_string());
}
_ => usage(),
}
}
Arguments {
subcommand: subcommand.unwrap_or_else(|| usage()),
log_level: log_level.unwrap_or(LevelFilter::Info),
json,
ignore_waves,
force_version: update_version,
all,
reboot,
variant,
}
}
fn fmt_full_version(update: &Update) -> String {
format!("{} {}", update.variant, update.version)
}
fn output<T: Serialize>(json: bool, object: T, string: &str) -> Result<()> {
if json {
println!(
"{}",
serde_json::to_string_pretty(&object).context(error::UpdateSerializeSnafu)?
);
} else {
println!("{string}");
}
Ok(())
}
fn initiate_reboot() -> Result<()> {
// Set up signal handler for termination signals
let mut signals = Signals::new([SIGTERM]).context(error::SignalSnafu)?;
let signals_handle = signals.handle();
thread::spawn(move || {
for _sig in signals.forever() {
// Ignore termination signals in case updog gets terminated
// before getting to exit normally by itself after invoking
// `shutdown -r` to complete the update.
}
});
if let Err(err) = process::Command::new("shutdown")
.arg("-r")
.status()
.context(error::RebootFailureSnafu)
{
// Kill the signal handling thread
signals_handle.close();
return Err(err);
}
Ok(())
}
/// Our underlying HTTP client, reqwest, supports proxies by reading the `HTTPS_PROXY` and `NO_PROXY`
/// environment variables. Bottlerocket services can source proxy.env before running, but updog is
/// not a service, so we read these values from the config file and add them to the environment
/// here.
fn set_https_proxy_environment_variables(
https_proxy: &Option<String>,
no_proxy: &Option<Vec<String>>,
) {
let proxy = match https_proxy {
Some(s) if !s.is_empty() => s.clone(),
// without https_proxy, no_proxy does nothing, so we are done
_ => return,
};
std::env::set_var("HTTPS_PROXY", proxy);
if let Some(no_proxy) = no_proxy {
if !no_proxy.is_empty() {
let no_proxy_string = no_proxy.join(",");
debug!("setting NO_PROXY={}", no_proxy_string);
std::env::set_var("NO_PROXY", &no_proxy_string);
}
}
}
#[allow(clippy::too_many_lines)]
fn main_inner() -> Result<()> {
// Parse and store the arguments passed to the program
let arguments = parse_args(std::env::args());
// SimpleLogger will send errors to stderr and anything less to stdout.
SimpleLogger::init(arguments.log_level, LogConfig::default()).context(error::LoggerSnafu)?;
let command =
serde_plain::from_str::<Command>(&arguments.subcommand).unwrap_or_else(|_| usage());
let config = load_config()?;
set_https_proxy_environment_variables(&config.https_proxy, &config.no_proxy);
let current_release = BottlerocketRelease::new().context(error::ReleaseVersionSnafu)?;
let variant = arguments.variant.unwrap_or(current_release.variant_id);
let transport = HttpQueryTransport::new();
// get a shared pointer to the transport's query_params so we can add metrics information to
// the transport's HTTP calls.
let mut query_params = transport.query_params();
set_common_query_params(&mut query_params, ¤t_release.version_id, &config);
let repository = load_repository(transport, &config)?;
let manifest = load_manifest(&repository)?;
let ignore_waves = arguments.ignore_waves || config.ignore_waves;
match command {
Command::CheckUpdate | Command::Whats => {
if arguments.all {
return list_updates(
&manifest,
&variant,
arguments.json,
ignore_waves,
config.seed,
);
}
let update = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)?
.context(error::UpdateNotAvailableSnafu)?;
output(arguments.json, update, &fmt_full_version(update))?;
}
Command::Update | Command::UpdateImage => {
if let Some(u) = update_required(
&manifest,
¤t_release.version_id,
&variant,
ignore_waves,
config.seed,
&config.version_lock,
arguments.force_version,
)? {
eprintln!("Starting update to {}", u.version);
query_params.add("target", u.version.to_string());
retrieve_migrations(
&repository,
&mut query_params,
&manifest,
u,
¤t_release.version_id,
)?;
update_image(u, &repository)?;
if command == Command::Update {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
output(
arguments.json,
u,
&format!("Update applied: {}", fmt_full_version(u)),
)?;
} else {
eprintln!("No update required");
}
}
Command::UpdateApply => {
update_flags()?;
if arguments.reboot {
initiate_reboot()?;
}
}
Command::UpdateRevert => {
revert_update_flags()?;
}
Command::Prepare => {
// TODO unimplemented
}
}
Ok(())
}
fn load_manifest(repository: &tough::Repository) -> Result<Manifest> {
let target = "manifest.json";
let target = target
.try_into()
.context(error::TargetNameSnafu { target })?;
Manifest::from_json(
repository
.read_target(&target)
.context(error::ManifestLoadSnafu)?
.context(error::ManifestNotFoundSnafu)?,
)
.context(error::ManifestParseSnafu)
}
fn main() -> ! {
std::process::exit(match main_inner() {
Ok(()) => 0,
Err(err) => {
eprintln!("{err}");
if let Some(var) = std::env::var_os("RUST_BACKTRACE") {
if var != "0" {
if let Some(backtrace) = err.backtrace() {
eprintln!("\n{backtrace:?}");
}
}
}
1
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration as TestDuration;
use std::collections::BTreeMap;
use update_metadata::Images;
#[test]
fn test_manifest_json() {
// Loads a general example of a manifest that includes an update with waves,
// a set of migrations, and some datastore mappings.
// This tests checks that it parses and the following properties are correct:
// - the (1.0, 1.1) migrations exist with the migration "migrate_1.1_foo"
// - the image:datastore mappings exist
// - there is a mapping between 1.11.0 and 1.0
let path = "tests/data/example.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(
!manifest.updates.is_empty(),
"Failed to parse update manifest"
);
assert!(
!manifest.migrations.is_empty(),
"Failed to parse migrations"
);
let from = Version::parse("1.11.0").unwrap();
let to = Version::parse("1.12.0").unwrap();
assert!(manifest
.migrations
.contains_key(&(from.clone(), to.clone())));
let migration = manifest.migrations.get(&(from, to)).unwrap();
assert!(migration[0] == "migrate_1.12.0_foo");
}
#[test]
fn test_serde_reader() {
// A basic manifest with a single update, no migrations, and two
// image:datastore mappings
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(!manifest.updates.is_empty());
}
#[test]
fn test_versions() |
#[test]
fn older_versions() {
// A manifest with two updates, both less than 0.1.3.
// Use a architecture specific JSON payload, otherwise updog will ignore the update
let path = format!("tests/data/example_3_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 1487,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("0.1.3").unwrap();
let variant = String::from("aws-k8s-1.15");
let update = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap();
assert!(update.is_some(), "Updog ignored max version");
assert!(
update.unwrap().version == Version::parse("0.1.2").unwrap(),
"Updog didn't choose the most recent valid version"
);
}
#[test]
fn test_multiple() {
// A manifest with four updates; two valid, one which exceeds the max
// version, and one which is for the opposite target architecture. This asserts that
// upgrading from the version 1.10.0 results in updating to 1.15.0
// instead of 1.13.0 (lower), 1.25.0 (too high), or 1.16.0 (wrong arch).
let path = format!("tests/data/multiple_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.10.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
let result = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap();
assert!(result.is_some(), "Updog failed to find an update");
if let Some(u) = result {
assert!(
u.version == Version::parse("1.15.0").unwrap(),
"Incorrect version: {}, should be 1.15.0",
u.version
);
}
}
#[test]
fn force_update_version() {
// A manifest with four updates; two valid, one which exceeds the max
// version, and one which is for the opposite target architecture. This tests forces
// a downgrade to 1.13.0, instead of 1.15.0 like it would be in the
// above test, test_multiple.
let path = format!("tests/data/multiple_{TARGET_ARCH}.json");
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.10.0").unwrap();
let forced = Version::parse("1.13.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
let result = update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
Some(forced),
)
.unwrap();
assert!(result.is_some(), "Updog failed to find an update");
if let Some(u) = result {
assert!(
u.version == Version::parse("1.13.0").unwrap(),
"Incorrect version: {}, should be forced to 1.13.0",
u.version
);
}
}
#[test]
fn bad_bound() {
// This manifest has an invalid key for one of the update's waves
assert!(
serde_json::from_str::<Manifest>(include_str!("../tests/data/bad-bound.json")).is_err()
);
}
#[test]
fn duplicate_bound() {
// This manifest has two waves with a bound id of 0
assert!(serde_json::from_str::<Manifest>(include_str!(
"../tests/data/duplicate-bound.json"
))
.is_err());
}
#[test]
fn serialize_metadata() {
// A basic manifest with a single update
let path = "tests/data/example_2.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
assert!(serde_json::to_string_pretty(&manifest)
.context(error::UpdateSerializeSnafu)
.is_ok());
}
#[test]
/// Make sure that `update_required()` doesn't return true unless the client's
/// wave is also ready.
fn check_update_waves() {
let mut manifest = Manifest::default();
let mut update = Update {
variant: String::from("aws-k8s-1.15"),
arch: String::from(TARGET_ARCH),
version: Version::parse("1.1.1").unwrap(),
max_version: Version::parse("1.1.1").unwrap(),
waves: BTreeMap::new(),
images: Images {
boot: String::from("boot"),
root: String::from("boot"),
hash: String::from("boot"),
},
};
let current_version = Version::parse("1.0.0").unwrap();
let variant = String::from("aws-k8s-1.15");
let first_wave_seed = 0;
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: first_wave_seed,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
// Two waves; the 1st wave that starts immediately, and the final wave which starts in one hour
let time = Utc::now();
update.waves.insert(0, time);
update.waves.insert(1024, time + TestDuration::hours(1));
update.waves.insert(2048, time + TestDuration::hours(1));
manifest.updates.push(update);
assert!(
update_required(
&manifest,
¤t_version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None,
)
.unwrap()
.is_some(),
"1st wave doesn't appear ready"
);
assert!(
update_required(
&manifest,
¤t_version,
&variant,
config.ignore_waves,
2000,
&config.version_lock,
None,
)
.unwrap()
.is_none(),
"Later wave incorrectly sees update"
);
}
}
| {
// A manifest with a single update whose version exceeds the max version.
// update in manifest has
// - version: 1.25.0
// - max_version: 1.20.0
let path = "tests/data/regret.json";
let manifest: Manifest = serde_json::from_reader(File::open(path).unwrap()).unwrap();
let config = Config {
metadata_base_url: String::from("foo"),
targets_base_url: String::from("bar"),
seed: 123,
version_lock: "latest".to_string(),
ignore_waves: false,
https_proxy: None,
no_proxy: None,
};
let version = Version::parse("1.18.0").unwrap();
let variant = String::from("bottlerocket-aws-eks");
assert!(
update_required(
&manifest,
&version,
&variant,
config.ignore_waves,
config.seed,
&config.version_lock,
None
)
.unwrap()
.is_none(),
"Updog tried to exceed max_version"
);
} | identifier_body |
main.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/cpu"
//"os/exec"
)
var (
index = 0
index2 = 0
Times [1000]string
Values [1000]string
Times2 [1000]string
Values2 [1000]string
)
type Strumemoria struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type Strucpu struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type CPU struct {
Cores int32 `json:"cores"`
Vendor string `json:"vendor"`
Family string `json:"family"`
Model string `json:"model"`
Speed string `json:"speed"`
Read float64 `json:"read"`
}
type prueba struct {
valor int
}
//AGREGANDO LO DE LA PAGINA PRINCIPAL(PROCESOS)
type PROCESOS struct{
Ejecucion int `json:"ejecucion"`
Suspendidos int `json:"suspendidos"`
Detenidos int `json:"detenidos"`
Zombie int `json:"zombie"`
Total int `json:"total"`
Datos string `json:"datos"`
Arbol string `json:"arbol"`
}
type hijo struct{
Nombre string
pid string
}
type Proc struct{
Nombre string
Hijos []hijo
}
var procesos []Proc;
func | (w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("mockram.json")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times[index] = parte[1]
Values[index] = strconv.Itoa(porcentajeram)
//fmt.Println("EL VALUES ES ", Values[index])
/* if index == 60 {
var Tiempos2 [1000]string
var Valores2 [1000]string
for j := 1; j <= 60; j++ {
var i = 0
Tiempos2[i] = Times[j]
Valores2[i] = Values[j]
i = i + 1
}
//index = index - 1
Times = Tiempos2
Values = Valores2
fmt.Println("la posicion 1 es ", Times[0])
fmt.Println("La posicion ultima es ", Times[60])
}
*/
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times[:], ","), Valores: strings.Join(Values[:], ","), Index: index}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index = index + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func obtenercpu(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("/proc/meminfo")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
//para el cpu
//cpuStat, err := cpu.Info()
percentage, err := cpu.Percent(0, true)
//fmt.Println("la info del cpu es ", cpuStat)
fmt.Println("EL PORCENTAJE DE USO ES ", percentage[0])
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
// fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
// porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times2[index2] = parte[1]
Values2[index2] = strconv.FormatFloat(percentage[0], 'f', 5, 64)
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times2[:], ","), Valores: strings.Join(Values2[:], ","), Index: index2}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index2 = index2 + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func getprocesos(w http.ResponseWriter, r *http.Request){
fmt.Println("ENTRE A LEER PROCESOS");
b, err := ioutil.ReadFile("procesos")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("Error abriendo procesos")
return ///si hay error se sale
}
}
func obtenerprincipal(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE a PRINCIPAL")
archivos, err := ioutil.ReadDir("/proc")
if err != nil {
log.Fatal(err)
}
procejecucion:=0;
procsuspendidos:=0;
procdetenidos:=0;
proczombies:=0;
contador:=0;
textocompleto:="";
//totalram:=0.0;
//totalrammegas:=0.0;
//pruebaram:=0.0;
procesos=nil;
for _, archivo := range archivos {
if(archivo.Name()[0]>48 && archivo.Name()[0]<58){
//TODOS ESTOS SON PROCESOS
nombreArchivo:= "/proc/";
nombreArchivo+= archivo.Name();
nombreArchivo+="/status";
bytesLeidos, err := ioutil.ReadFile(nombreArchivo)
if err != nil {
fmt.Printf("Error leyendo archivo: %v", err)
}
contenido := string(bytesLeidos)
splitsalto:=strings.Split(contenido,"\n");
hayram:=true;
nombre:="";
for _,salto:= range splitsalto{
splitpuntos:=strings.Split(salto,":");
if(splitpuntos[0]=="Name"){
//fmt.Printf("El nombre del proceso con id: %s es: %s\n",nombreArchivo,splitpuntos[1])
textocompleto+=archivo.Name()+",";
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
nombre=aux;
textocompleto+=aux+",";
}else if(splitpuntos[0]=="Uid"){
//USUARIO, falta cambiar por nombre del usuario
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
}else if(splitpuntos[0]=="State"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
if(aux=="R(running)"){
procejecucion+=1;
}else if(aux=="S(sleeping)"){
procsuspendidos+=1;
}else if(aux=="I(idle)"){
procdetenidos+=1;
}else if(aux=="Z(zombie)"){
proczombies+=1;
}else{
fmt.Println("Proceso en estado: %s",aux)
}
//PPID es el padre
}else if(splitpuntos[0]=="PPid"){
var hj hijo;
hj.Nombre=nombre;
hj.pid=archivo.Name();
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
//fmt.Printf("Llamando metodo de meter hijo")
meterhijo(hj,aux);
}else if(splitpuntos[0]=="VmRSS"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
aux=strings.ReplaceAll(aux,"kB","");
numero, err:= strconv.Atoi(aux);
if err != nil {
fmt.Printf("Error convirtiendo a numero la ram: %v", err)
numero=0;
}
//fmt.Printf("Numero convertido: %d",numero);
dec:=1.565;
dec=float64(numero)/80611.80;
//totalram+=dec;
//dec=dec/8000;
//aux=strconv.Itoa(dec);
aux= fmt.Sprintf("%f", dec)
textocompleto+=aux;
textocompleto+="%\n";
hayram=false;
}
}
if(hayram){
//SI NO ENCONTRO EL APARTADO DE RAM
textocompleto+="0.0%\n";
}
contador++;
}
}
//TERMINO DE LEER TODOS LOS PROCESOS
respuestamem := PROCESOS{Ejecucion: procejecucion, Suspendidos: procsuspendidos, Detenidos: procdetenidos, Zombie: proczombies, Total: contador,Datos:textocompleto, Arbol: getprocesos()}
crearj, errorjson := json.Marshal(respuestamem)
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//fmt.Println("Mandando json: %s",string(crearj))
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
fmt.Println("TERMINO CORRECTAMENTE EL PROCESO")
//imprimirprocesos();
}
func imprimirprocesos(){
for i:=0;i<len(procesos);i++{
fmt.Printf("Padre: %s{\n",procesos[i].Nombre);
for j:=0;j<len(procesos[i].Hijos);j++{
fmt.Printf("\tid: %s,",procesos[i].Hijos[j].pid);
fmt.Printf("\tNombre: %s\n",procesos[i].Hijos[j].Nombre);
}
fmt.Printf("}\n");
}
}
func meterhijo(proceso hijo,nombre string){
//proceso es el proceso a meter en hijos
//procesos la lista de procesos que voy manejando(los padres)
//nombre el id del padre a buscar
if(nombre=="0"){
return
}
for i:=0;i<len(procesos);i++ {
if(nombre== procesos[i].Nombre){
//SI ES EL PADRE
procesos[i].Hijos=append(procesos[i].Hijos,proceso);
return;
}
}
var aux Proc;
aux.Nombre=nombre;
aux.Hijos=append(aux.Hijos,proceso);
procesos=append(procesos,aux);
}
func getprocesos() string {
texto:="";
for i:=0;i<len(procesos);i++{
texto+=procesos[i].Nombre;
for j:=0;j<len(procesos[i].Hijos);j++{
texto+=","+procesos[i].Hijos[j].pid;
texto+=":"+procesos[i].Hijos[j].Nombre;
}
texto+="\n"
}
return texto;
}
func reply(w http.ResponseWriter, r *http.Request) {
fmt.Println("==============================")
fmt.Println("ENTRE A REPLY")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
if err := r.ParseForm(); err != nil {
fmt.Println(err);
}
//this is my first impulse. It makes the most sense to me.
//fmt.Println(r.PostForm); //out -> `map[]` would be `map[string]string` I think
//fmt.Println(r.PostForm["hat"]); //out -> `[]` would be `fez` or `["fez"]`
// fmt.Println(r.Body); //out -> `&{0xc82000e780 <nil> <nil> false true {0 0} false false false}`
type Hat struct {
Hat string
}
type Datos struct{
Nombre string `json:"Nombre"`
Departamento string `json:"Departamento"`
Edad int `json:"Edad"`
Forma string `json:"Forma de contagio"`
Estado string `json:"Estado"`
}
//this is the way the linked SO post above said should work. I don't see how the r.Body could be decoded.
decoder := json.NewDecoder(r.Body)
var t Datos
err := decoder.Decode(&t)
if err != nil {
fmt.Println(err);
}
if(t.Hat !=""){
fmt.Println("Se recibio nombre: ",t.Nombre);
//app := "kill"
//cmd:=exec.Command(app,t.Hat);
//stdout, err := cmd.Output()
if err != nil{
fmt.Println("Hubo un error!!!------------");
fmt.Println(err);
}else{
fmt.Println("TODO BINE!!!!");
fmt.Println(stdout);
}
}else{
fmt.Println("Vino vacio");
}
}
func main() {
//obtener ram
http.HandleFunc("/", obtenerram)
//obtener info del cpu
http.HandleFunc("/cpu", obtenercpu)
http.HandleFunc("/principal", obtenerprincipal)
http.HandleFunc("/post", reply)
log.Fatal(http.ListenAndServe(":3030", nil)) //log.fatal como que lo mantiene a la escucha y permite pararlo
}
| obtenerram | identifier_name |
main.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/cpu"
//"os/exec"
)
var (
index = 0
index2 = 0
Times [1000]string
Values [1000]string
Times2 [1000]string
Values2 [1000]string
)
type Strumemoria struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type Strucpu struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type CPU struct {
Cores int32 `json:"cores"`
Vendor string `json:"vendor"`
Family string `json:"family"`
Model string `json:"model"`
Speed string `json:"speed"`
Read float64 `json:"read"`
}
type prueba struct {
valor int
}
//AGREGANDO LO DE LA PAGINA PRINCIPAL(PROCESOS)
type PROCESOS struct{
Ejecucion int `json:"ejecucion"`
Suspendidos int `json:"suspendidos"`
Detenidos int `json:"detenidos"`
Zombie int `json:"zombie"`
Total int `json:"total"`
Datos string `json:"datos"`
Arbol string `json:"arbol"`
}
type hijo struct{
Nombre string
pid string
}
type Proc struct{
Nombre string
Hijos []hijo
}
var procesos []Proc;
func obtenerram(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("mockram.json")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times[index] = parte[1]
Values[index] = strconv.Itoa(porcentajeram)
//fmt.Println("EL VALUES ES ", Values[index])
/* if index == 60 {
var Tiempos2 [1000]string
var Valores2 [1000]string
for j := 1; j <= 60; j++ {
var i = 0
Tiempos2[i] = Times[j]
Valores2[i] = Values[j]
i = i + 1
}
//index = index - 1
Times = Tiempos2
Values = Valores2
fmt.Println("la posicion 1 es ", Times[0])
fmt.Println("La posicion ultima es ", Times[60])
}
*/
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times[:], ","), Valores: strings.Join(Values[:], ","), Index: index}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index = index + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func obtenercpu(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("/proc/meminfo")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
//para el cpu
//cpuStat, err := cpu.Info()
percentage, err := cpu.Percent(0, true)
//fmt.Println("la info del cpu es ", cpuStat)
fmt.Println("EL PORCENTAJE DE USO ES ", percentage[0])
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
// fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
// porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times2[index2] = parte[1]
Values2[index2] = strconv.FormatFloat(percentage[0], 'f', 5, 64)
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times2[:], ","), Valores: strings.Join(Values2[:], ","), Index: index2}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index2 = index2 + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil |
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func getprocesos(w http.ResponseWriter, r *http.Request){
fmt.Println("ENTRE A LEER PROCESOS");
b, err := ioutil.ReadFile("procesos")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("Error abriendo procesos")
return ///si hay error se sale
}
}
func obtenerprincipal(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE a PRINCIPAL")
archivos, err := ioutil.ReadDir("/proc")
if err != nil {
log.Fatal(err)
}
procejecucion:=0;
procsuspendidos:=0;
procdetenidos:=0;
proczombies:=0;
contador:=0;
textocompleto:="";
//totalram:=0.0;
//totalrammegas:=0.0;
//pruebaram:=0.0;
procesos=nil;
for _, archivo := range archivos {
if(archivo.Name()[0]>48 && archivo.Name()[0]<58){
//TODOS ESTOS SON PROCESOS
nombreArchivo:= "/proc/";
nombreArchivo+= archivo.Name();
nombreArchivo+="/status";
bytesLeidos, err := ioutil.ReadFile(nombreArchivo)
if err != nil {
fmt.Printf("Error leyendo archivo: %v", err)
}
contenido := string(bytesLeidos)
splitsalto:=strings.Split(contenido,"\n");
hayram:=true;
nombre:="";
for _,salto:= range splitsalto{
splitpuntos:=strings.Split(salto,":");
if(splitpuntos[0]=="Name"){
//fmt.Printf("El nombre del proceso con id: %s es: %s\n",nombreArchivo,splitpuntos[1])
textocompleto+=archivo.Name()+",";
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
nombre=aux;
textocompleto+=aux+",";
}else if(splitpuntos[0]=="Uid"){
//USUARIO, falta cambiar por nombre del usuario
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
}else if(splitpuntos[0]=="State"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
if(aux=="R(running)"){
procejecucion+=1;
}else if(aux=="S(sleeping)"){
procsuspendidos+=1;
}else if(aux=="I(idle)"){
procdetenidos+=1;
}else if(aux=="Z(zombie)"){
proczombies+=1;
}else{
fmt.Println("Proceso en estado: %s",aux)
}
//PPID es el padre
}else if(splitpuntos[0]=="PPid"){
var hj hijo;
hj.Nombre=nombre;
hj.pid=archivo.Name();
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
//fmt.Printf("Llamando metodo de meter hijo")
meterhijo(hj,aux);
}else if(splitpuntos[0]=="VmRSS"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
aux=strings.ReplaceAll(aux,"kB","");
numero, err:= strconv.Atoi(aux);
if err != nil {
fmt.Printf("Error convirtiendo a numero la ram: %v", err)
numero=0;
}
//fmt.Printf("Numero convertido: %d",numero);
dec:=1.565;
dec=float64(numero)/80611.80;
//totalram+=dec;
//dec=dec/8000;
//aux=strconv.Itoa(dec);
aux= fmt.Sprintf("%f", dec)
textocompleto+=aux;
textocompleto+="%\n";
hayram=false;
}
}
if(hayram){
//SI NO ENCONTRO EL APARTADO DE RAM
textocompleto+="0.0%\n";
}
contador++;
}
}
//TERMINO DE LEER TODOS LOS PROCESOS
respuestamem := PROCESOS{Ejecucion: procejecucion, Suspendidos: procsuspendidos, Detenidos: procdetenidos, Zombie: proczombies, Total: contador,Datos:textocompleto, Arbol: getprocesos()}
crearj, errorjson := json.Marshal(respuestamem)
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//fmt.Println("Mandando json: %s",string(crearj))
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
fmt.Println("TERMINO CORRECTAMENTE EL PROCESO")
//imprimirprocesos();
}
func imprimirprocesos(){
for i:=0;i<len(procesos);i++{
fmt.Printf("Padre: %s{\n",procesos[i].Nombre);
for j:=0;j<len(procesos[i].Hijos);j++{
fmt.Printf("\tid: %s,",procesos[i].Hijos[j].pid);
fmt.Printf("\tNombre: %s\n",procesos[i].Hijos[j].Nombre);
}
fmt.Printf("}\n");
}
}
func meterhijo(proceso hijo,nombre string){
//proceso es el proceso a meter en hijos
//procesos la lista de procesos que voy manejando(los padres)
//nombre el id del padre a buscar
if(nombre=="0"){
return
}
for i:=0;i<len(procesos);i++ {
if(nombre== procesos[i].Nombre){
//SI ES EL PADRE
procesos[i].Hijos=append(procesos[i].Hijos,proceso);
return;
}
}
var aux Proc;
aux.Nombre=nombre;
aux.Hijos=append(aux.Hijos,proceso);
procesos=append(procesos,aux);
}
func getprocesos() string {
texto:="";
for i:=0;i<len(procesos);i++{
texto+=procesos[i].Nombre;
for j:=0;j<len(procesos[i].Hijos);j++{
texto+=","+procesos[i].Hijos[j].pid;
texto+=":"+procesos[i].Hijos[j].Nombre;
}
texto+="\n"
}
return texto;
}
func reply(w http.ResponseWriter, r *http.Request) {
fmt.Println("==============================")
fmt.Println("ENTRE A REPLY")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
if err := r.ParseForm(); err != nil {
fmt.Println(err);
}
//this is my first impulse. It makes the most sense to me.
//fmt.Println(r.PostForm); //out -> `map[]` would be `map[string]string` I think
//fmt.Println(r.PostForm["hat"]); //out -> `[]` would be `fez` or `["fez"]`
// fmt.Println(r.Body); //out -> `&{0xc82000e780 <nil> <nil> false true {0 0} false false false}`
type Hat struct {
Hat string
}
type Datos struct{
Nombre string `json:"Nombre"`
Departamento string `json:"Departamento"`
Edad int `json:"Edad"`
Forma string `json:"Forma de contagio"`
Estado string `json:"Estado"`
}
//this is the way the linked SO post above said should work. I don't see how the r.Body could be decoded.
decoder := json.NewDecoder(r.Body)
var t Datos
err := decoder.Decode(&t)
if err != nil {
fmt.Println(err);
}
if(t.Hat !=""){
fmt.Println("Se recibio nombre: ",t.Nombre);
//app := "kill"
//cmd:=exec.Command(app,t.Hat);
//stdout, err := cmd.Output()
if err != nil{
fmt.Println("Hubo un error!!!------------");
fmt.Println(err);
}else{
fmt.Println("TODO BINE!!!!");
fmt.Println(stdout);
}
}else{
fmt.Println("Vino vacio");
}
}
func main() {
//obtener ram
http.HandleFunc("/", obtenerram)
//obtener info del cpu
http.HandleFunc("/cpu", obtenercpu)
http.HandleFunc("/principal", obtenerprincipal)
http.HandleFunc("/post", reply)
log.Fatal(http.ListenAndServe(":3030", nil)) //log.fatal como que lo mantiene a la escucha y permite pararlo
}
| {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
} | conditional_block |
main.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/cpu"
//"os/exec"
)
var (
index = 0
index2 = 0
Times [1000]string
Values [1000]string
Times2 [1000]string
Values2 [1000]string
)
type Strumemoria struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type Strucpu struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type CPU struct {
Cores int32 `json:"cores"`
Vendor string `json:"vendor"`
Family string `json:"family"`
Model string `json:"model"`
Speed string `json:"speed"`
Read float64 `json:"read"`
}
type prueba struct {
valor int
}
//AGREGANDO LO DE LA PAGINA PRINCIPAL(PROCESOS)
type PROCESOS struct{
Ejecucion int `json:"ejecucion"`
Suspendidos int `json:"suspendidos"`
Detenidos int `json:"detenidos"`
Zombie int `json:"zombie"`
Total int `json:"total"`
Datos string `json:"datos"`
Arbol string `json:"arbol"`
}
type hijo struct{
Nombre string
pid string
}
type Proc struct{
Nombre string
Hijos []hijo
}
var procesos []Proc;
func obtenerram(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("mockram.json")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times[index] = parte[1]
Values[index] = strconv.Itoa(porcentajeram)
//fmt.Println("EL VALUES ES ", Values[index])
/* if index == 60 {
var Tiempos2 [1000]string
var Valores2 [1000]string
for j := 1; j <= 60; j++ {
var i = 0
Tiempos2[i] = Times[j]
Valores2[i] = Values[j]
i = i + 1
}
//index = index - 1
Times = Tiempos2
Values = Valores2
fmt.Println("la posicion 1 es ", Times[0])
fmt.Println("La posicion ultima es ", Times[60])
}
*/
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times[:], ","), Valores: strings.Join(Values[:], ","), Index: index}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index = index + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func obtenercpu(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("/proc/meminfo")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
//para el cpu
//cpuStat, err := cpu.Info()
percentage, err := cpu.Percent(0, true)
//fmt.Println("la info del cpu es ", cpuStat)
fmt.Println("EL PORCENTAJE DE USO ES ", percentage[0])
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
// fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
// porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times2[index2] = parte[1]
Values2[index2] = strconv.FormatFloat(percentage[0], 'f', 5, 64)
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times2[:], ","), Valores: strings.Join(Values2[:], ","), Index: index2}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index2 = index2 + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func getprocesos(w http.ResponseWriter, r *http.Request){
fmt.Println("ENTRE A LEER PROCESOS");
b, err := ioutil.ReadFile("procesos")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("Error abriendo procesos")
return ///si hay error se sale
}
}
func obtenerprincipal(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE a PRINCIPAL")
archivos, err := ioutil.ReadDir("/proc")
if err != nil {
log.Fatal(err)
}
procejecucion:=0;
procsuspendidos:=0;
procdetenidos:=0;
proczombies:=0;
contador:=0;
textocompleto:="";
//totalram:=0.0;
//totalrammegas:=0.0;
//pruebaram:=0.0;
procesos=nil;
for _, archivo := range archivos {
if(archivo.Name()[0]>48 && archivo.Name()[0]<58){
//TODOS ESTOS SON PROCESOS
nombreArchivo:= "/proc/";
nombreArchivo+= archivo.Name();
nombreArchivo+="/status";
bytesLeidos, err := ioutil.ReadFile(nombreArchivo)
if err != nil {
fmt.Printf("Error leyendo archivo: %v", err)
}
contenido := string(bytesLeidos)
splitsalto:=strings.Split(contenido,"\n");
hayram:=true;
nombre:="";
for _,salto:= range splitsalto{
splitpuntos:=strings.Split(salto,":");
if(splitpuntos[0]=="Name"){
//fmt.Printf("El nombre del proceso con id: %s es: %s\n",nombreArchivo,splitpuntos[1])
textocompleto+=archivo.Name()+",";
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
nombre=aux;
textocompleto+=aux+",";
}else if(splitpuntos[0]=="Uid"){
//USUARIO, falta cambiar por nombre del usuario
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
}else if(splitpuntos[0]=="State"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
if(aux=="R(running)"){
procejecucion+=1;
}else if(aux=="S(sleeping)"){
procsuspendidos+=1;
}else if(aux=="I(idle)"){
procdetenidos+=1;
}else if(aux=="Z(zombie)"){
proczombies+=1;
}else{
fmt.Println("Proceso en estado: %s",aux)
}
//PPID es el padre
}else if(splitpuntos[0]=="PPid"){
var hj hijo;
hj.Nombre=nombre;
hj.pid=archivo.Name();
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
//fmt.Printf("Llamando metodo de meter hijo")
meterhijo(hj,aux);
}else if(splitpuntos[0]=="VmRSS"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
aux=strings.ReplaceAll(aux,"kB","");
numero, err:= strconv.Atoi(aux);
if err != nil {
fmt.Printf("Error convirtiendo a numero la ram: %v", err)
numero=0;
}
//fmt.Printf("Numero convertido: %d",numero);
dec:=1.565;
dec=float64(numero)/80611.80;
//totalram+=dec;
//dec=dec/8000;
//aux=strconv.Itoa(dec);
aux= fmt.Sprintf("%f", dec)
textocompleto+=aux;
textocompleto+="%\n";
hayram=false;
}
}
if(hayram){
//SI NO ENCONTRO EL APARTADO DE RAM
textocompleto+="0.0%\n";
}
contador++;
}
}
//TERMINO DE LEER TODOS LOS PROCESOS
respuestamem := PROCESOS{Ejecucion: procejecucion, Suspendidos: procsuspendidos, Detenidos: procdetenidos, Zombie: proczombies, Total: contador,Datos:textocompleto, Arbol: getprocesos()}
crearj, errorjson := json.Marshal(respuestamem)
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//fmt.Println("Mandando json: %s",string(crearj))
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
fmt.Println("TERMINO CORRECTAMENTE EL PROCESO")
//imprimirprocesos();
}
func imprimirprocesos(){
for i:=0;i<len(procesos);i++{
fmt.Printf("Padre: %s{\n",procesos[i].Nombre);
for j:=0;j<len(procesos[i].Hijos);j++{
fmt.Printf("\tid: %s,",procesos[i].Hijos[j].pid);
fmt.Printf("\tNombre: %s\n",procesos[i].Hijos[j].Nombre);
}
fmt.Printf("}\n");
}
}
func meterhijo(proceso hijo,nombre string){
//proceso es el proceso a meter en hijos
//procesos la lista de procesos que voy manejando(los padres)
//nombre el id del padre a buscar
if(nombre=="0"){
return
}
for i:=0;i<len(procesos);i++ {
if(nombre== procesos[i].Nombre){
//SI ES EL PADRE
procesos[i].Hijos=append(procesos[i].Hijos,proceso);
return;
}
}
var aux Proc;
aux.Nombre=nombre;
aux.Hijos=append(aux.Hijos,proceso);
procesos=append(procesos,aux);
}
func getprocesos() string {
texto:="";
for i:=0;i<len(procesos);i++{
texto+=procesos[i].Nombre;
for j:=0;j<len(procesos[i].Hijos);j++{
texto+=","+procesos[i].Hijos[j].pid;
texto+=":"+procesos[i].Hijos[j].Nombre;
}
texto+="\n"
}
return texto;
}
func reply(w http.ResponseWriter, r *http.Request) |
func main() {
//obtener ram
http.HandleFunc("/", obtenerram)
//obtener info del cpu
http.HandleFunc("/cpu", obtenercpu)
http.HandleFunc("/principal", obtenerprincipal)
http.HandleFunc("/post", reply)
log.Fatal(http.ListenAndServe(":3030", nil)) //log.fatal como que lo mantiene a la escucha y permite pararlo
}
| {
fmt.Println("==============================")
fmt.Println("ENTRE A REPLY")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
if err := r.ParseForm(); err != nil {
fmt.Println(err);
}
//this is my first impulse. It makes the most sense to me.
//fmt.Println(r.PostForm); //out -> `map[]` would be `map[string]string` I think
//fmt.Println(r.PostForm["hat"]); //out -> `[]` would be `fez` or `["fez"]`
// fmt.Println(r.Body); //out -> `&{0xc82000e780 <nil> <nil> false true {0 0} false false false}`
type Hat struct {
Hat string
}
type Datos struct{
Nombre string `json:"Nombre"`
Departamento string `json:"Departamento"`
Edad int `json:"Edad"`
Forma string `json:"Forma de contagio"`
Estado string `json:"Estado"`
}
//this is the way the linked SO post above said should work. I don't see how the r.Body could be decoded.
decoder := json.NewDecoder(r.Body)
var t Datos
err := decoder.Decode(&t)
if err != nil {
fmt.Println(err);
}
if(t.Hat !=""){
fmt.Println("Se recibio nombre: ",t.Nombre);
//app := "kill"
//cmd:=exec.Command(app,t.Hat);
//stdout, err := cmd.Output()
if err != nil{
fmt.Println("Hubo un error!!!------------");
fmt.Println(err);
}else{
fmt.Println("TODO BINE!!!!");
fmt.Println(stdout);
}
}else{
fmt.Println("Vino vacio");
}
} | identifier_body |
main.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/cpu"
//"os/exec"
)
var (
index = 0
index2 = 0
Times [1000]string
Values [1000]string
Times2 [1000]string
Values2 [1000]string
)
type Strumemoria struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type Strucpu struct {
Total int `json:"total"` //poner nombre de atributos en mayuscula
Libre int `json:"libre"`
Tiempos string `json:"tiempos"`
Valores string `json:"valores"`
Index int `json:"index"`
}
type CPU struct {
Cores int32 `json:"cores"`
Vendor string `json:"vendor"`
Family string `json:"family"`
Model string `json:"model"`
Speed string `json:"speed"`
Read float64 `json:"read"`
}
type prueba struct {
valor int
}
//AGREGANDO LO DE LA PAGINA PRINCIPAL(PROCESOS)
type PROCESOS struct{
Ejecucion int `json:"ejecucion"`
Suspendidos int `json:"suspendidos"`
Detenidos int `json:"detenidos"`
Zombie int `json:"zombie"`
Total int `json:"total"`
Datos string `json:"datos"`
Arbol string `json:"arbol"`
}
type hijo struct{
Nombre string
pid string
}
type Proc struct{
Nombre string
Hijos []hijo
}
var procesos []Proc;
func obtenerram(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("mockram.json")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times[index] = parte[1]
Values[index] = strconv.Itoa(porcentajeram)
//fmt.Println("EL VALUES ES ", Values[index])
/* if index == 60 {
var Tiempos2 [1000]string
var Valores2 [1000]string
for j := 1; j <= 60; j++ {
var i = 0
Tiempos2[i] = Times[j]
Valores2[i] = Values[j]
i = i + 1
}
//index = index - 1
Times = Tiempos2
Values = Valores2
fmt.Println("la posicion 1 es ", Times[0])
fmt.Println("La posicion ultima es ", Times[60])
}
*/
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times[:], ","), Valores: strings.Join(Values[:], ","), Index: index}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index = index + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func obtenercpu(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE")
/* fmt.Fprintf(w, "Welcome to the HomePage!") //IMPRIME EN LA WEB
fmt.Println("Endpoint Hit: homePage")*/ //IMPRIMIR EN CONSOLA AL
b, err := ioutil.ReadFile("/proc/meminfo")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("primer error return")
return ///si hay error se sale
}
str := string(b)
listainfo := strings.Split(string(str), "\n") //split por salto de linea
//para el cpu
//cpuStat, err := cpu.Info()
percentage, err := cpu.Percent(0, true)
//fmt.Println("la info del cpu es ", cpuStat)
fmt.Println("EL PORCENTAJE DE USO ES ", percentage[0])
memoriatotal := strings.Replace((listainfo[0])[10:24], " ", "", -1)
memorialibre := strings.Replace((listainfo[2])[15:24], " ", "", -1)
// fmt.Println("LA DISPONIBLE ES ", memorialibre)
ramtotal, err1 := strconv.Atoi(memoriatotal)
ramlibre, err2 := strconv.Atoi(memorialibre)
if err1 == nil && err2 == nil {
ramtotalmb := ramtotal / 1024
ramlibremb := ramlibre / 1024
// porcentajeram := ((ramtotalmb - ramlibremb) * 100) / ramtotalmb
//obtengo hora
v1 := time.Now().Format("01-02-2006 15:04:05")
parte := strings.Split(v1, " ")
Times2[index2] = parte[1]
Values2[index2] = strconv.FormatFloat(percentage[0], 'f', 5, 64)
respuestamem := Strumemoria{Total: ramtotalmb, Libre: ramlibremb, Tiempos: strings.Join(Times2[:], ","), Valores: strings.Join(Values2[:], ","), Index: index2}
// fmt.Println("O SEA SI ENTRO")
crearj, errorjson := json.Marshal(respuestamem)
index2 = index2 + 1
/*if index < 60 {
index = index + 1
} else {
index = 60
}*/
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
// convertir_a_cadena := string(jsonResponse)
// fmt.Println(convertir_a_cadena)
//fmt.Println("LLEGUE AL FINAL A RETORNAR JSON", jsonResponse)
} else {
return
}
}
func getprocesos(w http.ResponseWriter, r *http.Request){
fmt.Println("ENTRE A LEER PROCESOS");
b, err := ioutil.ReadFile("procesos")
if err != nil { //nil es contrario a null por asi decirlo
fmt.Println("Error abriendo procesos")
return ///si hay error se sale
}
}
func obtenerprincipal(w http.ResponseWriter, r *http.Request) {
fmt.Println("ENTRE a PRINCIPAL")
archivos, err := ioutil.ReadDir("/proc")
if err != nil {
log.Fatal(err)
}
procejecucion:=0;
procsuspendidos:=0;
procdetenidos:=0;
proczombies:=0;
contador:=0;
textocompleto:="";
//totalram:=0.0;
//totalrammegas:=0.0;
//pruebaram:=0.0;
procesos=nil;
for _, archivo := range archivos {
if(archivo.Name()[0]>48 && archivo.Name()[0]<58){
//TODOS ESTOS SON PROCESOS
nombreArchivo:= "/proc/";
nombreArchivo+= archivo.Name();
nombreArchivo+="/status";
bytesLeidos, err := ioutil.ReadFile(nombreArchivo) | hayram:=true;
nombre:="";
for _,salto:= range splitsalto{
splitpuntos:=strings.Split(salto,":");
if(splitpuntos[0]=="Name"){
//fmt.Printf("El nombre del proceso con id: %s es: %s\n",nombreArchivo,splitpuntos[1])
textocompleto+=archivo.Name()+",";
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
nombre=aux;
textocompleto+=aux+",";
}else if(splitpuntos[0]=="Uid"){
//USUARIO, falta cambiar por nombre del usuario
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
}else if(splitpuntos[0]=="State"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
textocompleto+=aux+",";
if(aux=="R(running)"){
procejecucion+=1;
}else if(aux=="S(sleeping)"){
procsuspendidos+=1;
}else if(aux=="I(idle)"){
procdetenidos+=1;
}else if(aux=="Z(zombie)"){
proczombies+=1;
}else{
fmt.Println("Proceso en estado: %s",aux)
}
//PPID es el padre
}else if(splitpuntos[0]=="PPid"){
var hj hijo;
hj.Nombre=nombre;
hj.pid=archivo.Name();
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
//fmt.Printf("Llamando metodo de meter hijo")
meterhijo(hj,aux);
}else if(splitpuntos[0]=="VmRSS"){
aux:=strings.ReplaceAll(splitpuntos[1],"\t","");
aux=strings.ReplaceAll(aux," ","");
aux=strings.ReplaceAll(aux,"kB","");
numero, err:= strconv.Atoi(aux);
if err != nil {
fmt.Printf("Error convirtiendo a numero la ram: %v", err)
numero=0;
}
//fmt.Printf("Numero convertido: %d",numero);
dec:=1.565;
dec=float64(numero)/80611.80;
//totalram+=dec;
//dec=dec/8000;
//aux=strconv.Itoa(dec);
aux= fmt.Sprintf("%f", dec)
textocompleto+=aux;
textocompleto+="%\n";
hayram=false;
}
}
if(hayram){
//SI NO ENCONTRO EL APARTADO DE RAM
textocompleto+="0.0%\n";
}
contador++;
}
}
//TERMINO DE LEER TODOS LOS PROCESOS
respuestamem := PROCESOS{Ejecucion: procejecucion, Suspendidos: procsuspendidos, Detenidos: procdetenidos, Zombie: proczombies, Total: contador,Datos:textocompleto, Arbol: getprocesos()}
crearj, errorjson := json.Marshal(respuestamem)
if errorjson != nil {
fmt.Println("HAY UN ERROR")
http.Error(w, errorjson.Error(), http.StatusInternalServerError)
return
}
//fmt.Println("Mandando json: %s",string(crearj))
//c fmt.Println("la memoria libre es ", respuestamem)
//conver := string(crearj)
// fmt.Println("EL indice es ", index)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(http.StatusOK)
w.Write(crearj)
fmt.Println("TERMINO CORRECTAMENTE EL PROCESO")
//imprimirprocesos();
}
func imprimirprocesos(){
for i:=0;i<len(procesos);i++{
fmt.Printf("Padre: %s{\n",procesos[i].Nombre);
for j:=0;j<len(procesos[i].Hijos);j++{
fmt.Printf("\tid: %s,",procesos[i].Hijos[j].pid);
fmt.Printf("\tNombre: %s\n",procesos[i].Hijos[j].Nombre);
}
fmt.Printf("}\n");
}
}
func meterhijo(proceso hijo,nombre string){
//proceso es el proceso a meter en hijos
//procesos la lista de procesos que voy manejando(los padres)
//nombre el id del padre a buscar
if(nombre=="0"){
return
}
for i:=0;i<len(procesos);i++ {
if(nombre== procesos[i].Nombre){
//SI ES EL PADRE
procesos[i].Hijos=append(procesos[i].Hijos,proceso);
return;
}
}
var aux Proc;
aux.Nombre=nombre;
aux.Hijos=append(aux.Hijos,proceso);
procesos=append(procesos,aux);
}
func getprocesos() string {
texto:="";
for i:=0;i<len(procesos);i++{
texto+=procesos[i].Nombre;
for j:=0;j<len(procesos[i].Hijos);j++{
texto+=","+procesos[i].Hijos[j].pid;
texto+=":"+procesos[i].Hijos[j].Nombre;
}
texto+="\n"
}
return texto;
}
func reply(w http.ResponseWriter, r *http.Request) {
fmt.Println("==============================")
fmt.Println("ENTRE A REPLY")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
if err := r.ParseForm(); err != nil {
fmt.Println(err);
}
//this is my first impulse. It makes the most sense to me.
//fmt.Println(r.PostForm); //out -> `map[]` would be `map[string]string` I think
//fmt.Println(r.PostForm["hat"]); //out -> `[]` would be `fez` or `["fez"]`
// fmt.Println(r.Body); //out -> `&{0xc82000e780 <nil> <nil> false true {0 0} false false false}`
type Hat struct {
Hat string
}
type Datos struct{
Nombre string `json:"Nombre"`
Departamento string `json:"Departamento"`
Edad int `json:"Edad"`
Forma string `json:"Forma de contagio"`
Estado string `json:"Estado"`
}
//this is the way the linked SO post above said should work. I don't see how the r.Body could be decoded.
decoder := json.NewDecoder(r.Body)
var t Datos
err := decoder.Decode(&t)
if err != nil {
fmt.Println(err);
}
if(t.Hat !=""){
fmt.Println("Se recibio nombre: ",t.Nombre);
//app := "kill"
//cmd:=exec.Command(app,t.Hat);
//stdout, err := cmd.Output()
if err != nil{
fmt.Println("Hubo un error!!!------------");
fmt.Println(err);
}else{
fmt.Println("TODO BINE!!!!");
fmt.Println(stdout);
}
}else{
fmt.Println("Vino vacio");
}
}
func main() {
//obtener ram
http.HandleFunc("/", obtenerram)
//obtener info del cpu
http.HandleFunc("/cpu", obtenercpu)
http.HandleFunc("/principal", obtenerprincipal)
http.HandleFunc("/post", reply)
log.Fatal(http.ListenAndServe(":3030", nil)) //log.fatal como que lo mantiene a la escucha y permite pararlo
} | if err != nil {
fmt.Printf("Error leyendo archivo: %v", err)
}
contenido := string(bytesLeidos)
splitsalto:=strings.Split(contenido,"\n"); | random_line_split |
PIL_ext.py | # -*- coding: utf-8 -*-
import itertools
import pathlib
import numpy as np
import numpy.linalg as LA
from PIL import Image
def tovector(image, k=None):
# image -> vector
data = np.asarray(image, dtype=np.float64)
if k:
return data[:,:, k].flatten()
else:
return data.flatten()
def tomatrix(images, way='row'):
# image -> matrix
if way in {'r', 'row'}:
return np.row_stack([tovector(image) for image in images])
elif way in {'c', 'col', 'column'}:
return np.column_stack([tovector(image) for image in images])
def toimage(vector, size, mode='RGB'):
# vector -> image
if mode == 'RGB':
if len(size)==2:
size += (3,)
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
else:
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
# from sklearn.preprocessing import FunctionTransformer
# class FiltTransformer(FunctionTransformer):
# '''Transform images to vectors
# '''
# def __init__(self, shape, channels, *args, **kwargs):
# def func(X):
# return np.column_stack([np.row_stack([filt(x.reshape(shape), channel).flatten() for x in X])
# for channel in channels])
# super(FiltTransformer, self).__init__(func=func, *args, **kwargs)
# def fit(self, X):
# """
# Transform images to vectors
# Arguments:
# X {list|array} -- images or a folder where images are stored
# """
# if isinstance(X, pathlib.Path):
# images = []
# for f in X.iterdir():
# try:
# img = Image.open(f).resize((200, 200))
# except IOError:
# print("Warning: 没有找到文件 <%s> 或读取文件失败"%f)
# else:
# images.append(img)
# else:
# images = X
# size = images[0].size
# assert np.all(im.size==size for im in X)
# return size
def get_images(files=None, folder=None, op=None, exts=('.jpg','.jpeg','.png')):
"""[summary]
[description]
Keyword Arguments:
files {List[Path]} -- jpg or jpeg files (default: {None})
folder {Path} -- the folder of images (default: {None})
op {Function} -- operating each image (default: {None})
exts {tuple[str]} -- (default: {('.jpg','.jpeg','.png')})
Returns:
List[Image] -- list of images
Raises:
Exception -- Provide files or a folder
LookupError -- A file name is invalid
"""
images = []
if files:
if folder:
files += [f for f in pathlib.Path(folder).iterdir()]
elif folder:
files = pathlib.Path(folder).iterdir()
else:
raise Exception('Must provide files or a folder')
for f in files:
if isinstance(f, str):
f = pathlib.Path(f)
if f.suffix == '':
for ext in exts:
f = pathlib.Path(f).with_suffix(ext)
if f.exists():
images.append(Image.open(f))
break
elif f.exists() and f.suffix in exts:
im = Image.open(f)
images.append(im)
else:
raise LookupError('Invalid file name %s' % f)
if op:
images = [op(image) for image in images]
return images
def lrmerge(im1, im2, loc=None):
'''Merge left part of `im1` and right part of `im2`
Example
------
im1 = Image.open(imagepath / 'go1.jpg')
im2 = Image.open(imagepath / 'go2.jpg')
im = lrmerge(im1, im2)
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = xsize1 // 2
elif loc <1:
loc = int(xsize1 * loc)
box1 = (0, 0, loc, ysize1)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def tbmerge(im1, im2, loc=None):
'''Merge top part of `im1` and bottum part of `im2`
See also lrmerge
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = ysize1 // 2
elif loc <1:
loc = int(ysize1 * loc)
box1 = (0, 0, xsize1, loc)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def resize_as(im1, im2):
im1.resize(im2.size)
def cover(im1, im2, w=10):
'''w: width of the gap
im1.size == im2.size
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
im = Image.new(im2.mode, (xsize2, ysize1+w))
box2 = (0, ysize1+w-ysize2, xsize2, ysize1+w)
im.paste(im2, box2)
box1 = (0, 0, xsize1, ysize1)
im.paste(im1, box1)
return im
def stackup(images, size=None, w=200):
# stack up images
if size is None:
size = images[0].size
im0 = images[0]
for im in images[1:]:
im = im.resize(size)
im0 = cover(im0, im, w=w)
return im0
def scale(image, k, l=None):
if l is None:
l = k
s = (image.size[0] * k, image.size[1] * l)
return image.resize(s)
def scale_w(image, width):
# scale an image according to a fixed width
w, h = image.size
h = int(h * width / w)
return image.resize((width, h))
def scale_h(image, height):
# scale an image according to a fixed height
w, h = image.size
w = int(w * height / h)
return image.resize((w, height))
|
def hstack(images, height=None):
'''Stack images horizontally
Arguments:
images {[Image]} -- list of images
Keyword Arguments:
height {Int} -- the common height of images (default: {None})
Returns:
Image -- the result of image stacking
'''
if height is None:
height = images[0].size[1]
images = [scale_h(im, height) for im in images]
stack = Image.new(images[0].mode, (sum(im.size[0] for im in images), height))
stack.paste(images[0])
shift = images[0].size[0]
for im in images[1:]:
stack.paste(im, (shift, 0))
shift += im.size[0]
return stack
def vstack(images, width=None):
# See also hstack
if width is None:
width = images[0].size[0]
images = [scale_w(im, width) for im in images]
stack = Image.new(images[0].mode, (width, sum(im.size[1] for im in images)))
stack.paste(images[0])
shift = images[0].size[1]
for im in images[1:]:
stack.paste(im, (0, shift))
shift += im.size[1]
return stack
def tile(layout, vh=True):
# vh: vstack then hstack
if vh:
imageList = [vstack(images) for images in layout]
return hstack(imageList)
else:
imageList = [hstack(images) for images in layout]
return vstack(imageList)
def sqstack(images, n=None, *args, **kwargs):
N = len(images)
if N ==1:
return images[0]
if n is None:
n = int(np.ceil(np.sqrt(N)))
layout = []
k = 0
while True:
if k+n<N:
layout.append(images[k:k+n])
elif k+n >= N:
layout.append(images[k:])
break
k += n
return tile(layout)
def palace(images):
assert len(images) == 9, 'exactly 9 images'
return tile([imags[:3], images[3:6], images[6:9]])
def center_paste(image, other):
# put other onto the center of the image
width1, height1 = image.size
width2, height2 = other.size
image.paste(other, ((width1-width2)//2, (height1-height2)//2))
return image
def fill_image(image):
width, height = image.size
#选取长和宽中较大值作为新图片的; 生成新图片
a = max(width, height)
new_image = Image.new(image.mode, (a, a), color='white')
#将之前的图粘贴在新图上,居中
if width > height:
new_image.paste(image, (0, (a - height) // 2))
else:
new_image.paste(image, ((a - width) // 2, 0))
return new_image
def cut_image(image):
# make 9-palace
width, height = image.size
item_width = width // 3
box_list = [(j*item_width, i*item_width, (j+1)*item_width, (i+1)*item_width) for i in range(3) for j in range(3)]
image_list = [image.crop(box) for box in box_list]
return image_list
def replace(image, small, box=None):
if box is None:
box = (0,0, *small.size)
elif len(box)==2:
box += small.size
image.paste(small, box)
def replaceOp(image, op, box):
"""Operate only part of the image
Arguments:
image {Image} -- the image
op {function} -- operation on images
box {tuple} -- an square area of the image
"""
small = op(image.crop(box))
replace(image, small, box)
def save_images(image_list, name=''):
for index, image in enumerate(image_list, 1):
image.save('%s%d.png' % (name, index), 'PNG')
class Background:
'''The background where you paint.
You can paint with pictures instead of pixels.
'''
def __init__(self, nrow, ncol, size=(50, 50), mode='RGB', *args, **kwargs):
'''
Arguments:
nrow {int} -- [the number of rows]
ncol {int} -- [the number of columns]
Keyword Arguments:
size {tuple|int} -- [the size of pictures] (default: {(50, 50)})
mode {str} -- [mode of image] (default: {'RGB'})
'''
if isinstance(size, int):
size = (size, size)
self.image = Image.new(mode, (ncol*size[0], nrow*size[1]))
self.nrow = nrow
self.ncol = ncol
self.size = size
self.mode = mode
def paste(self, img, coord=(0, 0), scale=None):
'''Embed a image into the background
Embed an image `img` into the background at coordinate `coord`,
Like ploting a point in an convas.
Arguments:
img {Image} -- [a small picture]
Keyword Arguments:
coord {tuple} -- [coordinate] (default: {(0, 0)})
scale {int} -- scaling the small image
'''
x, y = coord
if scale:
size = self.size[0] * scale, self.size[1] * scale
img = img.resize(size)
else:
img = img.resize(self.size)
self.image.paste(img, (x * self.size[0], y * self.size[1]))
def save(self, *args, **kwargs):
self.image.save(*args, **kwargs)
def draw(self, imgs, coords):
'''Embed several images into the background.
imgs[k] will be loacated at coords[k]
Arguments:
imgs {[Image]} -- list of images
coords {[tuple]} -- list of coordinates
'''
for img, coord in zip(itertools.cycle(imgs), coords):
self.paste(img, coord)
def patch(imgs, big):
"""
Patch a big image with small images
Arguments:
imgs {List[Image]} -- list of images
big {Image} -- big image
Returns:
an image
"""
bg = Background(big.size[1], big.size[0], size=imgs[0].size)
a = np.asarray(big, dtype=np.float64)
means = [np.asarray(img, dtype=np.float64).mean(axis=(0,1)) for img in imgs if img.mode=='RGB']
for i in range(big.size[1]):
for j in range(big.size[0]):
p = a[i, j, :]
k = np.argmin([LA.norm(p-m) for m in means])
bg.paste(imgs[k], (j, i))
return bg.image
def rgbimage(a, b, c):
assert a.shape == b.shape == c.shape
d = np.zeros(a.shape + (3,))
d[:,:,0]=a
d[:,:,1]=b
d[:,:,2]=c
return Image.fromarray(d.astype('uint8')).convert('RGB')
class imageOp:
"""Decorator for operation on images
Examples:
@imageOp(mode='RGB')
def f ...
@imageOp
def f ...
"""
def __new__(cls, f=None, mode=None):
obj = super(imageOp, cls).__new__(cls)
if mode is None and f is not None:
obj.mode = None
return obj(f)
else:
obj.mode = mode
return obj
def __call__(self, f):
def ff(image, *args, **kwargs):
array = np.asarray(image, dtype=np.float64)
if self.mode is None:
mode = image.mode
else:
mode = self.mode
return Image.fromarray(f(array, *args, **kwargs).astype('uint8')).convert(self.mode)
return ff
def cross(image1, image2, func):
# as imageOP but it is a binary function
a1 = np.asarray(image1, dtype=np.float64)
a2 = np.asarray(image2, dtype=np.float64)
return Image.fromarray(func(a1, a2).astype('uint8')).convert(image1.mode)
def filt(array, channel='a'):
import pywt
if array.ndim == 3:
abc=[]
for k in range(3):
wp = pywt.WaveletPacket2D(data=array[:,:,k], wavelet='db1', mode='symmetric')
abc.append(wp[channel].data)
return np.stack(abc, axis=2)
else:
wp = pywt.WaveletPacket2D(data=array, wavelet='db1', mode='symmetric')
return wp[channel].data
# folder=pathlib.Path('/Users/william/Programming/Python/toy/faces/eigen/')
# images=get_images(folder=folder)
# image=sqstack(images[:6])
# image.save(folder / '1.jpg')
# image=sqstack(images[6:])
# image.save(folder / '2.jpg')
# | random_line_split |
|
PIL_ext.py | # -*- coding: utf-8 -*-
import itertools
import pathlib
import numpy as np
import numpy.linalg as LA
from PIL import Image
def tovector(image, k=None):
# image -> vector
data = np.asarray(image, dtype=np.float64)
if k:
return data[:,:, k].flatten()
else:
return data.flatten()
def tomatrix(images, way='row'):
# image -> matrix
if way in {'r', 'row'}:
return np.row_stack([tovector(image) for image in images])
elif way in {'c', 'col', 'column'}:
return np.column_stack([tovector(image) for image in images])
def toimage(vector, size, mode='RGB'):
# vector -> image
if mode == 'RGB':
if len(size)==2:
size += (3,)
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
else:
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
# from sklearn.preprocessing import FunctionTransformer
# class FiltTransformer(FunctionTransformer):
# '''Transform images to vectors
# '''
# def __init__(self, shape, channels, *args, **kwargs):
# def func(X):
# return np.column_stack([np.row_stack([filt(x.reshape(shape), channel).flatten() for x in X])
# for channel in channels])
# super(FiltTransformer, self).__init__(func=func, *args, **kwargs)
# def fit(self, X):
# """
# Transform images to vectors
# Arguments:
# X {list|array} -- images or a folder where images are stored
# """
# if isinstance(X, pathlib.Path):
# images = []
# for f in X.iterdir():
# try:
# img = Image.open(f).resize((200, 200))
# except IOError:
# print("Warning: 没有找到文件 <%s> 或读取文件失败"%f)
# else:
# images.append(img)
# else:
# images = X
# size = images[0].size
# assert np.all(im.size==size for im in X)
# return size
def get_images(files=None, folder=None, op=None, exts=('.jpg','.jpeg','.png')):
"""[summary]
[description]
Keyword Arguments:
files {List[Path]} -- jpg or jpeg files (default: {None})
folder {Path} -- the folder of images (default: {None})
op {Function} -- operating each image (default: {None})
exts {tuple[str]} -- (default: {('.jpg','.jpeg','.png')})
Returns:
List[Image] -- list of images
Raises:
Exception -- Provide files or a folder
LookupError -- A file name is invalid
"""
images = []
if files:
if folder:
files += [f for f in pathlib.Path(folder).iterdir()]
elif folder:
files = pathlib.Path(folder).iterdir()
else:
raise Exception('Must provide files or a folder')
for f in files:
if isinstance(f, str):
f = pathlib.Path(f)
if f.suffix == '':
for ext in exts:
f = pathlib.Path(f).with_suffix(ext)
if f.exists():
images.append(Image.open(f | and f.suffix in exts:
im = Image.open(f)
images.append(im)
else:
raise LookupError('Invalid file name %s' % f)
if op:
images = [op(image) for image in images]
return images
def lrmerge(im1, im2, loc=None):
'''Merge left part of `im1` and right part of `im2`
Example
------
im1 = Image.open(imagepath / 'go1.jpg')
im2 = Image.open(imagepath / 'go2.jpg')
im = lrmerge(im1, im2)
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = xsize1 // 2
elif loc <1:
loc = int(xsize1 * loc)
box1 = (0, 0, loc, ysize1)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def tbmerge(im1, im2, loc=None):
'''Merge top part of `im1` and bottum part of `im2`
See also lrmerge
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = ysize1 // 2
elif loc <1:
loc = int(ysize1 * loc)
box1 = (0, 0, xsize1, loc)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def resize_as(im1, im2):
im1.resize(im2.size)
def cover(im1, im2, w=10):
'''w: width of the gap
im1.size == im2.size
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
im = Image.new(im2.mode, (xsize2, ysize1+w))
box2 = (0, ysize1+w-ysize2, xsize2, ysize1+w)
im.paste(im2, box2)
box1 = (0, 0, xsize1, ysize1)
im.paste(im1, box1)
return im
def stackup(images, size=None, w=200):
# stack up images
if size is None:
size = images[0].size
im0 = images[0]
for im in images[1:]:
im = im.resize(size)
im0 = cover(im0, im, w=w)
return im0
def scale(image, k, l=None):
if l is None:
l = k
s = (image.size[0] * k, image.size[1] * l)
return image.resize(s)
def scale_w(image, width):
# scale an image according to a fixed width
w, h = image.size
h = int(h * width / w)
return image.resize((width, h))
def scale_h(image, height):
# scale an image according to a fixed height
w, h = image.size
w = int(w * height / h)
return image.resize((w, height))
def hstack(images, height=None):
'''Stack images horizontally
Arguments:
images {[Image]} -- list of images
Keyword Arguments:
height {Int} -- the common height of images (default: {None})
Returns:
Image -- the result of image stacking
'''
if height is None:
height = images[0].size[1]
images = [scale_h(im, height) for im in images]
stack = Image.new(images[0].mode, (sum(im.size[0] for im in images), height))
stack.paste(images[0])
shift = images[0].size[0]
for im in images[1:]:
stack.paste(im, (shift, 0))
shift += im.size[0]
return stack
def vstack(images, width=None):
# See also hstack
if width is None:
width = images[0].size[0]
images = [scale_w(im, width) for im in images]
stack = Image.new(images[0].mode, (width, sum(im.size[1] for im in images)))
stack.paste(images[0])
shift = images[0].size[1]
for im in images[1:]:
stack.paste(im, (0, shift))
shift += im.size[1]
return stack
def tile(layout, vh=True):
# vh: vstack then hstack
if vh:
imageList = [vstack(images) for images in layout]
return hstack(imageList)
else:
imageList = [hstack(images) for images in layout]
return vstack(imageList)
def sqstack(images, n=None, *args, **kwargs):
N = len(images)
if N ==1:
return images[0]
if n is None:
n = int(np.ceil(np.sqrt(N)))
layout = []
k = 0
while True:
if k+n<N:
layout.append(images[k:k+n])
elif k+n >= N:
layout.append(images[k:])
break
k += n
return tile(layout)
def palace(images):
assert len(images) == 9, 'exactly 9 images'
return tile([imags[:3], images[3:6], images[6:9]])
def center_paste(image, other):
# put other onto the center of the image
width1, height1 = image.size
width2, height2 = other.size
image.paste(other, ((width1-width2)//2, (height1-height2)//2))
return image
def fill_image(image):
width, height = image.size
#选取长和宽中较大值作为新图片的; 生成新图片
a = max(width, height)
new_image = Image.new(image.mode, (a, a), color='white')
#将之前的图粘贴在新图上,居中
if width > height:
new_image.paste(image, (0, (a - height) // 2))
else:
new_image.paste(image, ((a - width) // 2, 0))
return new_image
def cut_image(image):
# make 9-palace
width, height = image.size
item_width = width // 3
box_list = [(j*item_width, i*item_width, (j+1)*item_width, (i+1)*item_width) for i in range(3) for j in range(3)]
image_list = [image.crop(box) for box in box_list]
return image_list
def replace(image, small, box=None):
if box is None:
box = (0,0, *small.size)
elif len(box)==2:
box += small.size
image.paste(small, box)
def replaceOp(image, op, box):
"""Operate only part of the image
Arguments:
image {Image} -- the image
op {function} -- operation on images
box {tuple} -- an square area of the image
"""
small = op(image.crop(box))
replace(image, small, box)
def save_images(image_list, name=''):
for index, image in enumerate(image_list, 1):
image.save('%s%d.png' % (name, index), 'PNG')
class Background:
'''The background where you paint.
You can paint with pictures instead of pixels.
'''
def __init__(self, nrow, ncol, size=(50, 50), mode='RGB', *args, **kwargs):
'''
Arguments:
nrow {int} -- [the number of rows]
ncol {int} -- [the number of columns]
Keyword Arguments:
size {tuple|int} -- [the size of pictures] (default: {(50, 50)})
mode {str} -- [mode of image] (default: {'RGB'})
'''
if isinstance(size, int):
size = (size, size)
self.image = Image.new(mode, (ncol*size[0], nrow*size[1]))
self.nrow = nrow
self.ncol = ncol
self.size = size
self.mode = mode
def paste(self, img, coord=(0, 0), scale=None):
'''Embed a image into the background
Embed an image `img` into the background at coordinate `coord`,
Like ploting a point in an convas.
Arguments:
img {Image} -- [a small picture]
Keyword Arguments:
coord {tuple} -- [coordinate] (default: {(0, 0)})
scale {int} -- scaling the small image
'''
x, y = coord
if scale:
size = self.size[0] * scale, self.size[1] * scale
img = img.resize(size)
else:
img = img.resize(self.size)
self.image.paste(img, (x * self.size[0], y * self.size[1]))
def save(self, *args, **kwargs):
self.image.save(*args, **kwargs)
def draw(self, imgs, coords):
'''Embed several images into the background.
imgs[k] will be loacated at coords[k]
Arguments:
imgs {[Image]} -- list of images
coords {[tuple]} -- list of coordinates
'''
for img, coord in zip(itertools.cycle(imgs), coords):
self.paste(img, coord)
def patch(imgs, big):
"""
Patch a big image with small images
Arguments:
imgs {List[Image]} -- list of images
big {Image} -- big image
Returns:
an image
"""
bg = Background(big.size[1], big.size[0], size=imgs[0].size)
a = np.asarray(big, dtype=np.float64)
means = [np.asarray(img, dtype=np.float64).mean(axis=(0,1)) for img in imgs if img.mode=='RGB']
for i in range(big.size[1]):
for j in range(big.size[0]):
p = a[i, j, :]
k = np.argmin([LA.norm(p-m) for m in means])
bg.paste(imgs[k], (j, i))
return bg.image
def rgbimage(a, b, c):
assert a.shape == b.shape == c.shape
d = np.zeros(a.shape + (3,))
d[:,:,0]=a
d[:,:,1]=b
d[:,:,2]=c
return Image.fromarray(d.astype('uint8')).convert('RGB')
class imageOp:
"""Decorator for operation on images
Examples:
@imageOp(mode='RGB')
def f ...
@imageOp
def f ...
"""
def __new__(cls, f=None, mode=None):
obj = super(imageOp, cls).__new__(cls)
if mode is None and f is not None:
obj.mode = None
return obj(f)
else:
obj.mode = mode
return obj
def __call__(self, f):
def ff(image, *args, **kwargs):
array = np.asarray(image, dtype=np.float64)
if self.mode is None:
mode = image.mode
else:
mode = self.mode
return Image.fromarray(f(array, *args, **kwargs).astype('uint8')).convert(self.mode)
return ff
def cross(image1, image2, func):
# as imageOP but it is a binary function
a1 = np.asarray(image1, dtype=np.float64)
a2 = np.asarray(image2, dtype=np.float64)
return Image.fromarray(func(a1, a2).astype('uint8')).convert(image1.mode)
def filt(array, channel='a'):
import pywt
if array.ndim == 3:
abc=[]
for k in range(3):
wp = pywt.WaveletPacket2D(data=array[:,:,k], wavelet='db1', mode='symmetric')
abc.append(wp[channel].data)
return np.stack(abc, axis=2)
else:
wp = pywt.WaveletPacket2D(data=array, wavelet='db1', mode='symmetric')
return wp[channel].data
# folder=pathlib.Path('/Users/william/Programming/Python/toy/faces/eigen/')
# images=get_images(folder=folder)
# image=sqstack(images[:6])
# image.save(folder / '1.jpg')
# image=sqstack(images[6:])
# image.save(folder / '2.jpg')
#
| ))
break
elif f.exists() | conditional_block |
PIL_ext.py | # -*- coding: utf-8 -*-
import itertools
import pathlib
import numpy as np
import numpy.linalg as LA
from PIL import Image
def tovector(image, k=None):
# image -> vector
data = np.asarray(image, dtype=np.float64)
if k:
return data[:,:, k].flatten()
else:
return data.flatten()
def | (images, way='row'):
# image -> matrix
if way in {'r', 'row'}:
return np.row_stack([tovector(image) for image in images])
elif way in {'c', 'col', 'column'}:
return np.column_stack([tovector(image) for image in images])
def toimage(vector, size, mode='RGB'):
# vector -> image
if mode == 'RGB':
if len(size)==2:
size += (3,)
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
else:
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
# from sklearn.preprocessing import FunctionTransformer
# class FiltTransformer(FunctionTransformer):
# '''Transform images to vectors
# '''
# def __init__(self, shape, channels, *args, **kwargs):
# def func(X):
# return np.column_stack([np.row_stack([filt(x.reshape(shape), channel).flatten() for x in X])
# for channel in channels])
# super(FiltTransformer, self).__init__(func=func, *args, **kwargs)
# def fit(self, X):
# """
# Transform images to vectors
# Arguments:
# X {list|array} -- images or a folder where images are stored
# """
# if isinstance(X, pathlib.Path):
# images = []
# for f in X.iterdir():
# try:
# img = Image.open(f).resize((200, 200))
# except IOError:
# print("Warning: 没有找到文件 <%s> 或读取文件失败"%f)
# else:
# images.append(img)
# else:
# images = X
# size = images[0].size
# assert np.all(im.size==size for im in X)
# return size
def get_images(files=None, folder=None, op=None, exts=('.jpg','.jpeg','.png')):
"""[summary]
[description]
Keyword Arguments:
files {List[Path]} -- jpg or jpeg files (default: {None})
folder {Path} -- the folder of images (default: {None})
op {Function} -- operating each image (default: {None})
exts {tuple[str]} -- (default: {('.jpg','.jpeg','.png')})
Returns:
List[Image] -- list of images
Raises:
Exception -- Provide files or a folder
LookupError -- A file name is invalid
"""
images = []
if files:
if folder:
files += [f for f in pathlib.Path(folder).iterdir()]
elif folder:
files = pathlib.Path(folder).iterdir()
else:
raise Exception('Must provide files or a folder')
for f in files:
if isinstance(f, str):
f = pathlib.Path(f)
if f.suffix == '':
for ext in exts:
f = pathlib.Path(f).with_suffix(ext)
if f.exists():
images.append(Image.open(f))
break
elif f.exists() and f.suffix in exts:
im = Image.open(f)
images.append(im)
else:
raise LookupError('Invalid file name %s' % f)
if op:
images = [op(image) for image in images]
return images
def lrmerge(im1, im2, loc=None):
'''Merge left part of `im1` and right part of `im2`
Example
------
im1 = Image.open(imagepath / 'go1.jpg')
im2 = Image.open(imagepath / 'go2.jpg')
im = lrmerge(im1, im2)
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = xsize1 // 2
elif loc <1:
loc = int(xsize1 * loc)
box1 = (0, 0, loc, ysize1)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def tbmerge(im1, im2, loc=None):
'''Merge top part of `im1` and bottum part of `im2`
See also lrmerge
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = ysize1 // 2
elif loc <1:
loc = int(ysize1 * loc)
box1 = (0, 0, xsize1, loc)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def resize_as(im1, im2):
im1.resize(im2.size)
def cover(im1, im2, w=10):
'''w: width of the gap
im1.size == im2.size
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
im = Image.new(im2.mode, (xsize2, ysize1+w))
box2 = (0, ysize1+w-ysize2, xsize2, ysize1+w)
im.paste(im2, box2)
box1 = (0, 0, xsize1, ysize1)
im.paste(im1, box1)
return im
def stackup(images, size=None, w=200):
# stack up images
if size is None:
size = images[0].size
im0 = images[0]
for im in images[1:]:
im = im.resize(size)
im0 = cover(im0, im, w=w)
return im0
def scale(image, k, l=None):
if l is None:
l = k
s = (image.size[0] * k, image.size[1] * l)
return image.resize(s)
def scale_w(image, width):
# scale an image according to a fixed width
w, h = image.size
h = int(h * width / w)
return image.resize((width, h))
def scale_h(image, height):
# scale an image according to a fixed height
w, h = image.size
w = int(w * height / h)
return image.resize((w, height))
def hstack(images, height=None):
'''Stack images horizontally
Arguments:
images {[Image]} -- list of images
Keyword Arguments:
height {Int} -- the common height of images (default: {None})
Returns:
Image -- the result of image stacking
'''
if height is None:
height = images[0].size[1]
images = [scale_h(im, height) for im in images]
stack = Image.new(images[0].mode, (sum(im.size[0] for im in images), height))
stack.paste(images[0])
shift = images[0].size[0]
for im in images[1:]:
stack.paste(im, (shift, 0))
shift += im.size[0]
return stack
def vstack(images, width=None):
# See also hstack
if width is None:
width = images[0].size[0]
images = [scale_w(im, width) for im in images]
stack = Image.new(images[0].mode, (width, sum(im.size[1] for im in images)))
stack.paste(images[0])
shift = images[0].size[1]
for im in images[1:]:
stack.paste(im, (0, shift))
shift += im.size[1]
return stack
def tile(layout, vh=True):
# vh: vstack then hstack
if vh:
imageList = [vstack(images) for images in layout]
return hstack(imageList)
else:
imageList = [hstack(images) for images in layout]
return vstack(imageList)
def sqstack(images, n=None, *args, **kwargs):
N = len(images)
if N ==1:
return images[0]
if n is None:
n = int(np.ceil(np.sqrt(N)))
layout = []
k = 0
while True:
if k+n<N:
layout.append(images[k:k+n])
elif k+n >= N:
layout.append(images[k:])
break
k += n
return tile(layout)
def palace(images):
assert len(images) == 9, 'exactly 9 images'
return tile([imags[:3], images[3:6], images[6:9]])
def center_paste(image, other):
# put other onto the center of the image
width1, height1 = image.size
width2, height2 = other.size
image.paste(other, ((width1-width2)//2, (height1-height2)//2))
return image
def fill_image(image):
width, height = image.size
#选取长和宽中较大值作为新图片的; 生成新图片
a = max(width, height)
new_image = Image.new(image.mode, (a, a), color='white')
#将之前的图粘贴在新图上,居中
if width > height:
new_image.paste(image, (0, (a - height) // 2))
else:
new_image.paste(image, ((a - width) // 2, 0))
return new_image
def cut_image(image):
# make 9-palace
width, height = image.size
item_width = width // 3
box_list = [(j*item_width, i*item_width, (j+1)*item_width, (i+1)*item_width) for i in range(3) for j in range(3)]
image_list = [image.crop(box) for box in box_list]
return image_list
def replace(image, small, box=None):
if box is None:
box = (0,0, *small.size)
elif len(box)==2:
box += small.size
image.paste(small, box)
def replaceOp(image, op, box):
"""Operate only part of the image
Arguments:
image {Image} -- the image
op {function} -- operation on images
box {tuple} -- an square area of the image
"""
small = op(image.crop(box))
replace(image, small, box)
def save_images(image_list, name=''):
for index, image in enumerate(image_list, 1):
image.save('%s%d.png' % (name, index), 'PNG')
class Background:
'''The background where you paint.
You can paint with pictures instead of pixels.
'''
def __init__(self, nrow, ncol, size=(50, 50), mode='RGB', *args, **kwargs):
'''
Arguments:
nrow {int} -- [the number of rows]
ncol {int} -- [the number of columns]
Keyword Arguments:
size {tuple|int} -- [the size of pictures] (default: {(50, 50)})
mode {str} -- [mode of image] (default: {'RGB'})
'''
if isinstance(size, int):
size = (size, size)
self.image = Image.new(mode, (ncol*size[0], nrow*size[1]))
self.nrow = nrow
self.ncol = ncol
self.size = size
self.mode = mode
def paste(self, img, coord=(0, 0), scale=None):
'''Embed a image into the background
Embed an image `img` into the background at coordinate `coord`,
Like ploting a point in an convas.
Arguments:
img {Image} -- [a small picture]
Keyword Arguments:
coord {tuple} -- [coordinate] (default: {(0, 0)})
scale {int} -- scaling the small image
'''
x, y = coord
if scale:
size = self.size[0] * scale, self.size[1] * scale
img = img.resize(size)
else:
img = img.resize(self.size)
self.image.paste(img, (x * self.size[0], y * self.size[1]))
def save(self, *args, **kwargs):
self.image.save(*args, **kwargs)
def draw(self, imgs, coords):
'''Embed several images into the background.
imgs[k] will be loacated at coords[k]
Arguments:
imgs {[Image]} -- list of images
coords {[tuple]} -- list of coordinates
'''
for img, coord in zip(itertools.cycle(imgs), coords):
self.paste(img, coord)
def patch(imgs, big):
"""
Patch a big image with small images
Arguments:
imgs {List[Image]} -- list of images
big {Image} -- big image
Returns:
an image
"""
bg = Background(big.size[1], big.size[0], size=imgs[0].size)
a = np.asarray(big, dtype=np.float64)
means = [np.asarray(img, dtype=np.float64).mean(axis=(0,1)) for img in imgs if img.mode=='RGB']
for i in range(big.size[1]):
for j in range(big.size[0]):
p = a[i, j, :]
k = np.argmin([LA.norm(p-m) for m in means])
bg.paste(imgs[k], (j, i))
return bg.image
def rgbimage(a, b, c):
assert a.shape == b.shape == c.shape
d = np.zeros(a.shape + (3,))
d[:,:,0]=a
d[:,:,1]=b
d[:,:,2]=c
return Image.fromarray(d.astype('uint8')).convert('RGB')
class imageOp:
"""Decorator for operation on images
Examples:
@imageOp(mode='RGB')
def f ...
@imageOp
def f ...
"""
def __new__(cls, f=None, mode=None):
obj = super(imageOp, cls).__new__(cls)
if mode is None and f is not None:
obj.mode = None
return obj(f)
else:
obj.mode = mode
return obj
def __call__(self, f):
def ff(image, *args, **kwargs):
array = np.asarray(image, dtype=np.float64)
if self.mode is None:
mode = image.mode
else:
mode = self.mode
return Image.fromarray(f(array, *args, **kwargs).astype('uint8')).convert(self.mode)
return ff
def cross(image1, image2, func):
# as imageOP but it is a binary function
a1 = np.asarray(image1, dtype=np.float64)
a2 = np.asarray(image2, dtype=np.float64)
return Image.fromarray(func(a1, a2).astype('uint8')).convert(image1.mode)
def filt(array, channel='a'):
import pywt
if array.ndim == 3:
abc=[]
for k in range(3):
wp = pywt.WaveletPacket2D(data=array[:,:,k], wavelet='db1', mode='symmetric')
abc.append(wp[channel].data)
return np.stack(abc, axis=2)
else:
wp = pywt.WaveletPacket2D(data=array, wavelet='db1', mode='symmetric')
return wp[channel].data
# folder=pathlib.Path('/Users/william/Programming/Python/toy/faces/eigen/')
# images=get_images(folder=folder)
# image=sqstack(images[:6])
# image.save(folder / '1.jpg')
# image=sqstack(images[6:])
# image.save(folder / '2.jpg')
#
| tomatrix | identifier_name |
PIL_ext.py | # -*- coding: utf-8 -*-
import itertools
import pathlib
import numpy as np
import numpy.linalg as LA
from PIL import Image
def tovector(image, k=None):
# image -> vector
data = np.asarray(image, dtype=np.float64)
if k:
return data[:,:, k].flatten()
else:
return data.flatten()
def tomatrix(images, way='row'):
# image -> matrix
if way in {'r', 'row'}:
return np.row_stack([tovector(image) for image in images])
elif way in {'c', 'col', 'column'}:
return np.column_stack([tovector(image) for image in images])
def toimage(vector, size, mode='RGB'):
# vector -> image
if mode == 'RGB':
if len(size)==2:
size += (3,)
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
else:
return Image.fromarray(vector.reshape(size).astype('uint8')).convert(mode)
# from sklearn.preprocessing import FunctionTransformer
# class FiltTransformer(FunctionTransformer):
# '''Transform images to vectors
# '''
# def __init__(self, shape, channels, *args, **kwargs):
# def func(X):
# return np.column_stack([np.row_stack([filt(x.reshape(shape), channel).flatten() for x in X])
# for channel in channels])
# super(FiltTransformer, self).__init__(func=func, *args, **kwargs)
# def fit(self, X):
# """
# Transform images to vectors
# Arguments:
# X {list|array} -- images or a folder where images are stored
# """
# if isinstance(X, pathlib.Path):
# images = []
# for f in X.iterdir():
# try:
# img = Image.open(f).resize((200, 200))
# except IOError:
# print("Warning: 没有找到文件 <%s> 或读取文件失败"%f)
# else:
# images.append(img)
# else:
# images = X
# size = images[0].size
# assert np.all(im.size==size for im in X)
# return size
def get_images(files=None, folder=None, op=None, exts=('.jpg','.jpeg','.png')):
"""[summary]
[description]
Keyword Arguments:
files {List[Path]} -- jpg or jpeg files (default: {None})
folder {Path} -- the folder of images (default: {None})
op {Function} -- operating each image (default: {None})
exts {tuple[str]} -- (default: {('.jpg','.jpeg','.png')})
Returns:
List[Image] -- list of images
Raises:
Exception -- Provide files or a folder
LookupError -- A file name is invalid
"""
images = []
if files:
if folder:
files += [f for f in pathlib.Path(folder).iterdir()]
elif folder:
files = pathlib.Path(folder).iterdir()
else:
raise Exception('Must provide files or a folder')
for f in files:
if isinstance(f, str):
f = pathlib.Path(f)
if f.suffix == '':
for ext in exts:
f = pathlib.Path(f).with_suffix(ext)
if f.exists():
images.append(Image.open(f))
break
elif f.exists() and f.suffix in exts:
im = Image.open(f)
images.append(im)
else:
raise LookupError('Invalid file name %s' % f)
if op:
images = [op(image) for image in images]
return images
def lrmerge(im1, im2, loc=None):
'''Merge left part of `im1` and right part of `im2`
Example
------
im1 = Image.open(imagepath / 'go1.jpg')
im2 = Image.open(imagepath / 'go2.jpg')
im = lrmerge(im1, im2)
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = xsize1 // 2
elif loc <1:
loc = int(xsize1 * loc)
box1 = (0, 0, loc, ysize1)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def tbmerge(im1, im2, loc=None):
'''Merge top part of `im1` and bottum part of `im2`
See also lrmerge
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
if loc is None:
loc = ysize1 // 2
elif loc <1:
loc = int(ysize1 * loc)
box1 = (0, 0, xsize1, loc)
im1 = im1.crop(box1)
im2.paste(im1, box1)
return im2
def resize_as(im1, im2):
im1.resize(im2.size)
def cover(im1, im2, w=10):
'''w: width of the gap
im1.size == im2.size
'''
xsize1, ysize1 = im1.size
xsize2, ysize2 = im2.size
im = Image.new(im2.mode, (xsize2, ysize1+w))
box2 = (0, ysize1+w-ysize2, xsize2, ysize1+w)
im.paste(im2, box2)
box1 = (0, 0, xsize1, ysize1)
im.paste(im1, box1)
return im
def stackup(images, size=None, w=200):
# stack up images
if size is None:
size = images[0].size
im0 = images[0]
for im in images[1:]:
im = im.resize(size)
im0 = cover(im0, im, w=w)
return im0
def scale(image, k, l=None):
if l is None:
l = k
s = (image.size[0] * k, image.size[1] * l)
return image.resize(s)
def scale_w(image, width):
# scale an image according to a fixed width
w, h = image.size
h = int(h * width / w)
return image.resize((width, h))
def scale_h(image, height):
# scale an image according to a fixed height
w, h = image.size
w = int(w * height / h)
return image.resize((w, height))
def hstack(images, height=None):
'''Stack images horizontally
Arguments:
images {[Image]} -- list of images
Keyword Arguments:
height {Int} -- the common height of images (default: {None})
Returns:
Image -- the result of image stacking
'''
if height is None:
height = images[0].size[1]
images = [scale_h(im, height) for im in images]
stack = Image.new(images[0].mode, (sum(im.size[0] for im in images), height))
stack.paste(images[0])
shift = images[0].size[0]
for im in images[1:]:
stack.paste(im, (shift, 0))
shift += im.size[0]
return stack
def vstack(images, width=None):
# See also hstack
if width is None:
width = images[0].size[0]
images = [scale_w(im, width) for im in images]
stack = Image.new(images[0].mode, (width, sum(im.size[1] for im in images)))
stack.paste(images[0])
shift = images[0].size[1]
for im in images[1:]:
stack.paste(im, (0, shift))
shift += im.size[1]
return stack
def tile(layout, vh=True):
# vh: vstack then hstack
if vh:
imageList = [vstack(images) for images in layout]
return hstack(imageList)
else:
imageList = [hstack(images) for images in layout]
return vstack(imageList)
def sqstack(images, n=None, *args, **kwargs):
N = len(images)
if N |
assert len(images) == 9, 'exactly 9 images'
return tile([imags[:3], images[3:6], images[6:9]])
def center_paste(image, other):
# put other onto the center of the image
width1, height1 = image.size
width2, height2 = other.size
image.paste(other, ((width1-width2)//2, (height1-height2)//2))
return image
def fill_image(image):
width, height = image.size
#选取长和宽中较大值作为新图片的; 生成新图片
a = max(width, height)
new_image = Image.new(image.mode, (a, a), color='white')
#将之前的图粘贴在新图上,居中
if width > height:
new_image.paste(image, (0, (a - height) // 2))
else:
new_image.paste(image, ((a - width) // 2, 0))
return new_image
def cut_image(image):
# make 9-palace
width, height = image.size
item_width = width // 3
box_list = [(j*item_width, i*item_width, (j+1)*item_width, (i+1)*item_width) for i in range(3) for j in range(3)]
image_list = [image.crop(box) for box in box_list]
return image_list
def replace(image, small, box=None):
if box is None:
box = (0,0, *small.size)
elif len(box)==2:
box += small.size
image.paste(small, box)
def replaceOp(image, op, box):
"""Operate only part of the image
Arguments:
image {Image} -- the image
op {function} -- operation on images
box {tuple} -- an square area of the image
"""
small = op(image.crop(box))
replace(image, small, box)
def save_images(image_list, name=''):
for index, image in enumerate(image_list, 1):
image.save('%s%d.png' % (name, index), 'PNG')
class Background:
'''The background where you paint.
You can paint with pictures instead of pixels.
'''
def __init__(self, nrow, ncol, size=(50, 50), mode='RGB', *args, **kwargs):
'''
Arguments:
nrow {int} -- [the number of rows]
ncol {int} -- [the number of columns]
Keyword Arguments:
size {tuple|int} -- [the size of pictures] (default: {(50, 50)})
mode {str} -- [mode of image] (default: {'RGB'})
'''
if isinstance(size, int):
size = (size, size)
self.image = Image.new(mode, (ncol*size[0], nrow*size[1]))
self.nrow = nrow
self.ncol = ncol
self.size = size
self.mode = mode
def paste(self, img, coord=(0, 0), scale=None):
'''Embed a image into the background
Embed an image `img` into the background at coordinate `coord`,
Like ploting a point in an convas.
Arguments:
img {Image} -- [a small picture]
Keyword Arguments:
coord {tuple} -- [coordinate] (default: {(0, 0)})
scale {int} -- scaling the small image
'''
x, y = coord
if scale:
size = self.size[0] * scale, self.size[1] * scale
img = img.resize(size)
else:
img = img.resize(self.size)
self.image.paste(img, (x * self.size[0], y * self.size[1]))
def save(self, *args, **kwargs):
self.image.save(*args, **kwargs)
def draw(self, imgs, coords):
'''Embed several images into the background.
imgs[k] will be loacated at coords[k]
Arguments:
imgs {[Image]} -- list of images
coords {[tuple]} -- list of coordinates
'''
for img, coord in zip(itertools.cycle(imgs), coords):
self.paste(img, coord)
def patch(imgs, big):
"""
Patch a big image with small images
Arguments:
imgs {List[Image]} -- list of images
big {Image} -- big image
Returns:
an image
"""
bg = Background(big.size[1], big.size[0], size=imgs[0].size)
a = np.asarray(big, dtype=np.float64)
means = [np.asarray(img, dtype=np.float64).mean(axis=(0,1)) for img in imgs if img.mode=='RGB']
for i in range(big.size[1]):
for j in range(big.size[0]):
p = a[i, j, :]
k = np.argmin([LA.norm(p-m) for m in means])
bg.paste(imgs[k], (j, i))
return bg.image
def rgbimage(a, b, c):
assert a.shape == b.shape == c.shape
d = np.zeros(a.shape + (3,))
d[:,:,0]=a
d[:,:,1]=b
d[:,:,2]=c
return Image.fromarray(d.astype('uint8')).convert('RGB')
class imageOp:
"""Decorator for operation on images
Examples:
@imageOp(mode='RGB')
def f ...
@imageOp
def f ...
"""
def __new__(cls, f=None, mode=None):
obj = super(imageOp, cls).__new__(cls)
if mode is None and f is not None:
obj.mode = None
return obj(f)
else:
obj.mode = mode
return obj
def __call__(self, f):
def ff(image, *args, **kwargs):
array = np.asarray(image, dtype=np.float64)
if self.mode is None:
mode = image.mode
else:
mode = self.mode
return Image.fromarray(f(array, *args, **kwargs).astype('uint8')).convert(self.mode)
return ff
def cross(image1, image2, func):
# as imageOP but it is a binary function
a1 = np.asarray(image1, dtype=np.float64)
a2 = np.asarray(image2, dtype=np.float64)
return Image.fromarray(func(a1, a2).astype('uint8')).convert(image1.mode)
def filt(array, channel='a'):
import pywt
if array.ndim == 3:
abc=[]
for k in range(3):
wp = pywt.WaveletPacket2D(data=array[:,:,k], wavelet='db1', mode='symmetric')
abc.append(wp[channel].data)
return np.stack(abc, axis=2)
else:
wp = pywt.WaveletPacket2D(data=array, wavelet='db1', mode='symmetric')
return wp[channel].data
# folder=pathlib.Path('/Users/william/Programming/Python/toy/faces/eigen/')
# images=get_images(folder=folder)
# image=sqstack(images[:6])
# image.save(folder / '1.jpg')
# image=sqstack(images[6:])
# image.save(folder / '2.jpg')
#
| ==1:
return images[0]
if n is None:
n = int(np.ceil(np.sqrt(N)))
layout = []
k = 0
while True:
if k+n<N:
layout.append(images[k:k+n])
elif k+n >= N:
layout.append(images[k:])
break
k += n
return tile(layout)
def palace(images):
| identifier_body |
app.py | from flask import Flask, request, render_template, jsonify, redirect, url_for, session, flash
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, IntegerField, FloatField, SelectField
from wtforms.validators import InputRequired, Email, Length, Optional
from werkzeug.security import generate_password_hash, check_password_hash
import views
# initializations
app = Flask(__name__)
app.config['SECRET_KEY']='estoessecretoXD!'
Bootstrap(app)
# PostreSQL Connection
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:password@localhost/postgres'
#para q no mande alertas cuando hagamos modificaciones (opcional)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
# MySQL Connection
#app.config['MYSQL_HOST'] = 'localhost'
#app.config['MYSQL_USER'] = 'root'
#app.config['MYSQL_PASSWORD'] = 'password'
#app.config['MYSQL_DB'] = 'flaskcontacts'
#mysql = MySQL(app)
#instancia de la bd postrge
db = SQLAlchemy(app) #para usar la bd en otra aplicaciones colocar () sin mas
#instancia de marshmallow
ma = Marshmallow(app)
class Usuario(db.Model):
#El orm requere una columna id obligatoriamente
id = db.Column(db.Integer, primary_key = True)
nombre = db.Column(db.String(40), unique=True)
contra = db.Column(db.String(1000))
email = db.Column(db.String(100), unique=True)
telefono = db.Column(db.String(9))
#Constructor q se ejecuta por cada instancia de la clase
def __init__(self, nombre, contra, email, telefono):
self.nombre = nombre
self.contra = contra
self.email = email
self.telefono = telefono
class Producto(db.Model):
id = db.Column(db.Integer, primary_key = True)
nombreProd = db.Column(db.String(40))
precio = db.Column(db.Float)
cantidad = db.Column(db.Integer)
categoria = db.Column(db.String(20))
descripcion = db.Column(db.String(100))
imagen = db.Column(db.String(50))
def __init__(self, nombreProd, precio, cantidad, categoria, descripcion, imagen):
self.nombreProd = nombreProd
self.precio = precio
self.cantidad = cantidad
self.categoria = categoria
self.descripcion = descripcion
self.imagen = imagen
class Pedido(db.Model):
id = db.Column(db.Integer, primary_key = True)
producto_id = db.Column(db.Integer, db.ForeignKey('producto.id'))
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id'))
cantidad = db.Column(db.Integer)
precio_uni = db.Column(db.Float)
precio_total = db.Column(db.Float)
estado = db.Column(db.String(20))
def __init__(self, producto_id, usuario_id, cantidad, precio_uni, precio_total, estado):
self.producto_id = producto_id
self.usuario_id = usuario_id
self.cantidad = cantidad
self.precio_uni = precio_uni
self.precio_total = precio_total
self.estado = estado
#sentencia para crear todas las tablas
db.create_all()
#creacion de esquema para Usuario
class UsuarioSchema(ma.Schema):
class Meta:
fields = ('id', 'nombre', 'contra', 'email', 'telefono')#señalo campos que quiero cada vez que interactue con el esquema
usuario_schema = UsuarioSchema() #permite interactuar con un usuario a la vez
usuarios_schema = UsuarioSchema(many=True) #con varios
class ProductoSchema(ma.Schema):
class Meta:
fields = ("id", "nombreProd", "precio", "cantidad", "categoria", "descripcion", "imagen")
producto_schema = ProductoSchema()
productos_schema = ProductoSchema(many=True)
class PedidoSchema(ma.Schema):
class Meta:
fields = ("id", "producto_id", "usuario_id", "cantidad", "precio_uni", "precio_total", "estado")
pedido_schema = PedidoSchema()
pedidos_schema = PedidoSchema(many=True)
#HASTA AQUI TERMINA LA DEFINICION DE LA BASE DE DATOS
@app.route('/')
def Index():
return render_template("index.html")
#///////////////////////////////////////
# OPERACIONES CON USUARIO - INCIO
#///////////////////////////////////////
#URL para crear Usuarios
@app.route('/crearUsuario', methods=['POST'])
def create_user():
#print(request.json)
#return 'received'
nombre = request.json['nombre']
contra = request.json['contra']
email = request.json['email']
telefono = request.json['telefono']
contra_cifrada = generate_password_hash(contra)
#check_password_hash(pwhash, password)
contra_noCifrada = check_password_hash(contra_cifrada, contra)
print(contra_noCifrada)
new_user = Usuario(nombre, contra_cifrada, email, telefono) #Creo un Usuario
db.session.add(new_user) #lo cargo a la BD
db.session.commit() #termino la operacion
return usuario_schema.jsonify(new_user) #devuelvwe el usuario creado al front
#URL para listar Usuarios
@app.route("/listarUsuarios", methods=["GET"])
def get_users():
all_users = Usuario.query.all() #devuelve todos los usuarios
#print("ALL_USERS: ",type(all_users))
#result = usuarios_schema.dump(all_users) #graba la lista de usuario recuperados
#print("RESULT: ",type(result))
#print(result)
#return jsonify(result) #devulve el resultado al cliente en formato JSON
return render_template('ListarUsuariosAdmin.html',lista = all_users)
#URL para buscar un Usuario específico
@app.route("/listarUsuarios/<id>", methods=["GET"])
def get_user(id):
user = Usuario.query.get_or_404(id) #si no funciona quitar _or_404
#return usuario_schema.jsonify(user)
return "Usuario: %s / Email: %s / Telefono: %s" % (user.nombre, user.email, user.telefono)
#URL para actualizar usuario por id tbm funcion con metodo POST x si hay error en el front
@app.route("/actualizarUsuario", methods=["GET","POST"])
def update_user():
form = RegisterForm()
id = session["id_user"]
#recupera al usuario
user = Usuario.query.get(id)
print("GAAAAAAAAA")
if request.method == "GET":
print("GEEET")
return render_template('ActualizarDatos.html', nombre=user.nombre, email=user.email, telefono=user.telefono, form=form)
else:
if form.validate_on_submit():
if user:
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
user.nombre = form.nombre.data
user.contra = contra_cifrada
user.email = form.email.data
user.telefono = form.telefono.data
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.commit() #termino la operacion
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
return("ERROR")
'''
#recupera los campos del request
nombre = request.json["nombre"]
contra = request.json["contra"]
email = request.json["email"]
telefono = request.json["telefono"]
contra_cifrada = generate_password_hash(contra)
#actualiza los campos
user.nombre = nombre
user.contra = contra
user.email = email
user.telefono = telefono
#guarda los cambios
db.session.commit()
return usuario_schema.jsonify(user)'''
@app.route("/eliminarUsuario", methods=["POST"])
def delete_user():
id = session["id_user"]
user = Usuario.query.get(id) #busca al usuario
print(user)
db.session.delete(user) #lo elimina
db.session.commit() #guarda cambios
#if "user" in session:
session.pop("user")
session.pop("id_user")
return render_template('index.html')
#return usuario_schema.jsonify(user) #devuelve el usuario eliminado
#///////////////////////////////////////
# OPERACIONES CON USUARIO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - INCIO
#///////////////////////////////////////
class ProductForm(FlaskForm):#Crea el formulario de regisgtro de productos
nombreProd=StringField('nombreProd',validators=[InputRequired(), Length(min=1,max=30)])
precio = FloatField('precio',validators=[InputRequired()])
cantidad = IntegerField('cantidad',validators=[InputRequired()])
categoria=SelectField('categoria', validators=[InputRequired()], choices=[("LA","Lácteos"),("EN","Enlatados"),("CE","Carnes y embutidos"),("PL","Productos de limpieza"),("FV","Frutas y Verduras")])
descripcion=TextAreaField('descripcion',validators=[Optional(), Length(min=2,max=100)])
imagen=StringField('imagen',validators=[Optional(), Length(min=2,max=50)])
#@app.route("/admin",methods=["GET"])
#def indexAdmin():
# return render_template("Listarproductos.html")
#LISTAR PRODUCTOS POR CATEGORIA (LISTA TODOS POR DEFAULT)
@app.route("/admin/", methods=["GET"])
@app.route("/admin/<cat>", methods=["GET"])
def get_products_by_cat(cat="ALL"):
products = Producto.query.all() #devuelve una lista
p_filtrados = [] #lista vacia
cat = request.args.get('cat')
opciones=["LA","EN","CE","PL","FV"]
if (cat in opciones):
for p in products:
if(cat == p.categoria):
p_filtrados.append(p)
else:
p_filtrados = products
#res = productos_schema.dump(p_filtrados) #convierte la lista en un esquema de productos
#return jsonify(res) #devuelve el esquema convertido a json
return render_template('Listarproductos.html',listaProd = p_filtrados)
@app.route('/crearProducto', methods=['GET','POST'])
def create_product():
form = ProductForm()
if request.method == "GET":
return render_template("AgregarProducto.html",form=form)
else:
if form.validate_on_submit():
nuevo_producto=Producto(nombreProd=form.nombreProd.data, precio=form.precio.data, cantidad=form.cantidad.data, categoria=form.categoria.data, descripcion=form.descripcion.data, imagen=form.imagen.data)
db.session.add(nuevo_producto) #lo cargo a la BD
db.session.commit() #termino la operacion
#user=Usuario.query.filter_by(nombre=(session["user"])).first()
#return render_template("Listarproductos.html")
#print("LE TONGUEEEEE")
return redirect(url_for("get_products_by_cat"))
#return render_template('Registradoconexito.html')
#print("GAAAAAA")
return redirect(url_for("get_products_by_cat"))
'''
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
new_prod = Producto(nombreProd, precio, cantidad, categoria, descripcion, imagen)
db.session.add(new_prod)
db.session.commit()
return producto_schema.jsonify(new_prod)'''
@app.route("/listarProductos", methods=["GET"])
def get_products():
all |
@app.route("/actualizarProducto/<id>", methods=["PUT"])
def update_product(id):
#recupera al producto
prod = Producto.query.get(id)
#recupera los campos del request
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
#actualiza los campos
prod.nombreProd = nombreProd
prod.precio = precio
prod.cantidad = cantidad
prod.categoria = categoria
prod.descripcion = descripcion
prod.imagen = imagen
#guarda los cambios
db.session.commit()
return producto_schema.jsonify(prod)
@app.route("/eliminarProducto/<id>", methods=["POST"])
def delete_product(id):
prod = Producto.query.get(id)
db.session.delete(prod)
db.session.commit()
return redirect(url_for("get_products_by_cat"))
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# LOGIN - Inicio (operaciones con el usuario)
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
class LoginForm(FlaskForm):
email= StringField('Email',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('Contraseña',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/login", methods=['GET','POST'])
def login():
form = LoginForm()
#if "user" in session:
# print("segundo" +session["user"])
# return render_template('index.html')
# print("GAAAAAA")
if form.validate_on_submit():
#print("primer" + session["user"])
user=Usuario.query.filter_by(email=form.email.data).first()
if user:
if check_password_hash(user.contra,form.contra.data):
session["user"] = user.nombre
session["id_user"]= user.id
print(session["user"] )
print(session["id_user"])
#success_message = 'Bienvenido {}'.format(user.nombre)
#flash(success_message)
print("LOGGGEADOOOOO")
#return render_template('Categorizacion.html')
return redirect(url_for('see_products')) #va el nombre de la funcion, no de la ruta
error_message = "Usuario o contraseña incorrectos"
flash(error_message)
return render_template('signin.html', form=form)
return render_template('signin.html', form=form)
@app.route('/verProductos', methods=['GET'])
def see_products():
return render_template('Categorizacion.html')
@app.route("/logout", methods=['GET','POST'])
def logout():
if "user" in session:
session.pop("user")
if "id_user" in session:
session.pop("id_user")
return render_template('index.html')
class RegisterForm(FlaskForm):#Crea el formulario de regisgtro del usuario
email=StringField('email',validators=[InputRequired(), Length(min=4,max=30)])
nombre=StringField('nombre',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('contra',validators=[InputRequired(), Length(min=8,max=30)])
telefono=StringField('telefono',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/registrarse", methods=['GET','POST'])
def registro():
form = RegisterForm()
if form.validate_on_submit():
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
nuevo_usuario=Usuario(nombre=form.nombre.data, contra=contra_cifrada, email=form.email.data, telefono=form.telefono.data)
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.add(nuevo_usuario) #lo cargo a la BD
db.session.commit() #termino la operacion
user=Usuario.query.filter_by(nombre=(session["user"])).first()
session["id_user"]= user.id
return render_template('Registradoconexito.html')
#nombre contra email telf------> Atributos del Usuario
return render_template('Registrate.html', form=form)
@app.route("/miCuenta", methods=['GET','POST'])
def revisarMiCuenta():
id = session["id_user"]
user = Usuario.query.get(id)
print(user.email)
print(user.id)
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
@app.route("/verPedidos",methods=["GET"])
def ver_Pedidos():
return render_template("AdministrarPedido.html")
@app.errorhandler(404)
def not_found(error=None):
msg =jsonify({
"holas": "c q es obvio pero... algo anda mal",
"mensaje": "Recurso no encontrado: "+ request.url,
"status": 404
})
return msg
#main + TAB
if __name__ == "__main__":
app.run(debug=True, port=8000) | _prods = Producto.query.all()
result = productos_schema.dump(all_prods)
#print(result)
return jsonify(result)
| identifier_body |
app.py | from flask import Flask, request, render_template, jsonify, redirect, url_for, session, flash
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, IntegerField, FloatField, SelectField
from wtforms.validators import InputRequired, Email, Length, Optional
from werkzeug.security import generate_password_hash, check_password_hash
import views
# initializations
app = Flask(__name__)
app.config['SECRET_KEY']='estoessecretoXD!'
Bootstrap(app)
# PostreSQL Connection
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:password@localhost/postgres'
#para q no mande alertas cuando hagamos modificaciones (opcional)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
# MySQL Connection
#app.config['MYSQL_HOST'] = 'localhost'
#app.config['MYSQL_USER'] = 'root'
#app.config['MYSQL_PASSWORD'] = 'password'
#app.config['MYSQL_DB'] = 'flaskcontacts'
#mysql = MySQL(app)
#instancia de la bd postrge
db = SQLAlchemy(app) #para usar la bd en otra aplicaciones colocar () sin mas
#instancia de marshmallow
ma = Marshmallow(app)
class Usuario(db.Model):
#El orm requere una columna id obligatoriamente
id = db.Column(db.Integer, primary_key = True)
nombre = db.Column(db.String(40), unique=True)
contra = db.Column(db.String(1000))
email = db.Column(db.String(100), unique=True)
telefono = db.Column(db.String(9))
#Constructor q se ejecuta por cada instancia de la clase
def __init__(self, nombre, contra, email, telefono):
self.nombre = nombre
self.contra = contra
self.email = email
self.telefono = telefono
class Producto(db.Model):
id = db.Column(db.Integer, primary_key = True)
nombreProd = db.Column(db.String(40))
precio = db.Column(db.Float)
cantidad = db.Column(db.Integer)
categoria = db.Column(db.String(20))
descripcion = db.Column(db.String(100))
imagen = db.Column(db.String(50))
def __init__(self, nombreProd, precio, cantidad, categoria, descripcion, imagen):
self.nombreProd = nombreProd
self.precio = precio
self.cantidad = cantidad
self.categoria = categoria
self.descripcion = descripcion
self.imagen = imagen
class Pedido(db.Model):
id = db.Column(db.Integer, primary_key = True)
producto_id = db.Column(db.Integer, db.ForeignKey('producto.id'))
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id'))
cantidad = db.Column(db.Integer)
precio_uni = db.Column(db.Float)
precio_total = db.Column(db.Float)
estado = db.Column(db.String(20))
def __init__(self, producto_id, usuario_id, cantidad, precio_uni, precio_total, estado):
self.producto_id = producto_id
self.usuario_id = usuario_id
self.cantidad = cantidad
self.precio_uni = precio_uni
self.precio_total = precio_total
self.estado = estado
#sentencia para crear todas las tablas
db.create_all()
#creacion de esquema para Usuario
class UsuarioSchema(ma.Schema):
class Meta:
fields = ('id', 'nombre', 'contra', 'email', 'telefono')#señalo campos que quiero cada vez que interactue con el esquema
usuario_schema = UsuarioSchema() #permite interactuar con un usuario a la vez
usuarios_schema = UsuarioSchema(many=True) #con varios
class ProductoSchema(ma.Schema):
class Meta:
fields = ("id", "nombreProd", "precio", "cantidad", "categoria", "descripcion", "imagen")
producto_schema = ProductoSchema()
productos_schema = ProductoSchema(many=True)
class PedidoSchema(ma.Schema):
class Meta:
fields = ("id", "producto_id", "usuario_id", "cantidad", "precio_uni", "precio_total", "estado")
pedido_schema = PedidoSchema()
pedidos_schema = PedidoSchema(many=True)
#HASTA AQUI TERMINA LA DEFINICION DE LA BASE DE DATOS
@app.route('/')
def Index():
return render_template("index.html")
#///////////////////////////////////////
# OPERACIONES CON USUARIO - INCIO
#///////////////////////////////////////
#URL para crear Usuarios
@app.route('/crearUsuario', methods=['POST'])
def create_user():
#print(request.json)
#return 'received'
nombre = request.json['nombre']
contra = request.json['contra']
email = request.json['email']
telefono = request.json['telefono']
contra_cifrada = generate_password_hash(contra)
#check_password_hash(pwhash, password)
contra_noCifrada = check_password_hash(contra_cifrada, contra)
print(contra_noCifrada)
new_user = Usuario(nombre, contra_cifrada, email, telefono) #Creo un Usuario
db.session.add(new_user) #lo cargo a la BD
db.session.commit() #termino la operacion
return usuario_schema.jsonify(new_user) #devuelvwe el usuario creado al front
#URL para listar Usuarios
@app.route("/listarUsuarios", methods=["GET"])
def get_users():
all_users = Usuario.query.all() #devuelve todos los usuarios
#print("ALL_USERS: ",type(all_users))
#result = usuarios_schema.dump(all_users) #graba la lista de usuario recuperados
#print("RESULT: ",type(result))
#print(result)
#return jsonify(result) #devulve el resultado al cliente en formato JSON
return render_template('ListarUsuariosAdmin.html',lista = all_users)
#URL para buscar un Usuario específico
@app.route("/listarUsuarios/<id>", methods=["GET"])
def get_user(id):
user = Usuario.query.get_or_404(id) #si no funciona quitar _or_404
#return usuario_schema.jsonify(user)
return "Usuario: %s / Email: %s / Telefono: %s" % (user.nombre, user.email, user.telefono)
#URL para actualizar usuario por id tbm funcion con metodo POST x si hay error en el front
@app.route("/actualizarUsuario", methods=["GET","POST"])
def update_user():
form = RegisterForm()
id = session["id_user"]
#recupera al usuario
user = Usuario.query.get(id)
print("GAAAAAAAAA")
if request.method == "GET":
print("GEEET")
return render_template('ActualizarDatos.html', nombre=user.nombre, email=user.email, telefono=user.telefono, form=form)
else:
if form.validate_on_submit():
if user:
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
user.nombre = form.nombre.data
user.contra = contra_cifrada
user.email = form.email.data
user.telefono = form.telefono.data
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.commit() #termino la operacion
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
return("ERROR")
'''
#recupera los campos del request
nombre = request.json["nombre"]
contra = request.json["contra"]
email = request.json["email"]
telefono = request.json["telefono"]
contra_cifrada = generate_password_hash(contra)
#actualiza los campos
user.nombre = nombre
user.contra = contra
user.email = email
user.telefono = telefono
#guarda los cambios
db.session.commit()
return usuario_schema.jsonify(user)'''
@app.route("/eliminarUsuario", methods=["POST"])
def delete_user():
id = session["id_user"]
user = Usuario.query.get(id) #busca al usuario
print(user)
db.session.delete(user) #lo elimina
db.session.commit() #guarda cambios
#if "user" in session:
session.pop("user")
session.pop("id_user")
return render_template('index.html')
#return usuario_schema.jsonify(user) #devuelve el usuario eliminado
#///////////////////////////////////////
# OPERACIONES CON USUARIO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - INCIO
#///////////////////////////////////////
class ProductForm(FlaskForm):#Crea el formulario de regisgtro de productos
nombreProd=StringField('nombreProd',validators=[InputRequired(), Length(min=1,max=30)])
precio = FloatField('precio',validators=[InputRequired()])
cantidad = IntegerField('cantidad',validators=[InputRequired()])
categoria=SelectField('categoria', validators=[InputRequired()], choices=[("LA","Lácteos"),("EN","Enlatados"),("CE","Carnes y embutidos"),("PL","Productos de limpieza"),("FV","Frutas y Verduras")])
descripcion=TextAreaField('descripcion',validators=[Optional(), Length(min=2,max=100)])
imagen=StringField('imagen',validators=[Optional(), Length(min=2,max=50)])
#@app.route("/admin",methods=["GET"])
#def indexAdmin():
# return render_template("Listarproductos.html")
#LISTAR PRODUCTOS POR CATEGORIA (LISTA TODOS POR DEFAULT)
@app.route("/admin/", methods=["GET"])
@app.route("/admin/<cat>", methods=["GET"])
def get_products_by_cat(cat="ALL"):
products = Producto.query.all() #devuelve una lista
p_filtrados = [] #lista vacia
cat = request.args.get('cat')
opciones=["LA","EN","CE","PL","FV"]
if (cat in opciones):
for p in products:
if(cat == p.categoria):
p_filtrados.append(p)
else:
p_filtrados = products
#res = productos_schema.dump(p_filtrados) #convierte la lista en un esquema de productos
#return jsonify(res) #devuelve el esquema convertido a json
return render_template('Listarproductos.html',listaProd = p_filtrados)
@app.route('/crearProducto', methods=['GET','POST'])
def create_product():
form = ProductForm()
if request.method == "GET":
return render_template("AgregarProducto.html",form=form)
else:
if form.validate_on_submit():
nuevo_producto=Producto(nombreProd=form.nombreProd.data, precio=form.precio.data, cantidad=form.cantidad.data, categoria=form.categoria.data, descripcion=form.descripcion.data, imagen=form.imagen.data)
db.session.add(nuevo_producto) #lo cargo a la BD
db.session.commit() #termino la operacion
#user=Usuario.query.filter_by(nombre=(session["user"])).first()
#return render_template("Listarproductos.html")
#print("LE TONGUEEEEE")
return redirect(url_for("get_products_by_cat"))
#return render_template('Registradoconexito.html')
#print("GAAAAAA")
return redirect(url_for("get_products_by_cat"))
'''
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
new_prod = Producto(nombreProd, precio, cantidad, categoria, descripcion, imagen)
db.session.add(new_prod)
db.session.commit()
return producto_schema.jsonify(new_prod)'''
@app.route("/listarProductos", methods=["GET"])
def get_products():
all_prods = Producto.query.all()
result = productos_schema.dump(all_prods)
#print(result)
return jsonify(result)
@app.route("/actualizarProducto/<id>", methods=["PUT"])
def update_product(id):
#recupera al producto
prod = Producto.query.get(id)
#recupera los campos del request
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
#actualiza los campos
prod.nombreProd = nombreProd
prod.precio = precio
prod.cantidad = cantidad
prod.categoria = categoria
prod.descripcion = descripcion
prod.imagen = imagen
#guarda los cambios
db.session.commit()
return producto_schema.jsonify(prod)
@app.route("/eliminarProducto/<id>", methods=["POST"])
def delete_product(id):
prod = Producto.query.get(id)
db.session.delete(prod)
db.session.commit()
return redirect(url_for("get_products_by_cat"))
| # OPERACIONES DE PRODUCTO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# LOGIN - Inicio (operaciones con el usuario)
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
class LoginForm(FlaskForm):
email= StringField('Email',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('Contraseña',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/login", methods=['GET','POST'])
def login():
form = LoginForm()
#if "user" in session:
# print("segundo" +session["user"])
# return render_template('index.html')
# print("GAAAAAA")
if form.validate_on_submit():
#print("primer" + session["user"])
user=Usuario.query.filter_by(email=form.email.data).first()
if user:
if check_password_hash(user.contra,form.contra.data):
session["user"] = user.nombre
session["id_user"]= user.id
print(session["user"] )
print(session["id_user"])
#success_message = 'Bienvenido {}'.format(user.nombre)
#flash(success_message)
print("LOGGGEADOOOOO")
#return render_template('Categorizacion.html')
return redirect(url_for('see_products')) #va el nombre de la funcion, no de la ruta
error_message = "Usuario o contraseña incorrectos"
flash(error_message)
return render_template('signin.html', form=form)
return render_template('signin.html', form=form)
@app.route('/verProductos', methods=['GET'])
def see_products():
return render_template('Categorizacion.html')
@app.route("/logout", methods=['GET','POST'])
def logout():
if "user" in session:
session.pop("user")
if "id_user" in session:
session.pop("id_user")
return render_template('index.html')
class RegisterForm(FlaskForm):#Crea el formulario de regisgtro del usuario
email=StringField('email',validators=[InputRequired(), Length(min=4,max=30)])
nombre=StringField('nombre',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('contra',validators=[InputRequired(), Length(min=8,max=30)])
telefono=StringField('telefono',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/registrarse", methods=['GET','POST'])
def registro():
form = RegisterForm()
if form.validate_on_submit():
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
nuevo_usuario=Usuario(nombre=form.nombre.data, contra=contra_cifrada, email=form.email.data, telefono=form.telefono.data)
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.add(nuevo_usuario) #lo cargo a la BD
db.session.commit() #termino la operacion
user=Usuario.query.filter_by(nombre=(session["user"])).first()
session["id_user"]= user.id
return render_template('Registradoconexito.html')
#nombre contra email telf------> Atributos del Usuario
return render_template('Registrate.html', form=form)
@app.route("/miCuenta", methods=['GET','POST'])
def revisarMiCuenta():
id = session["id_user"]
user = Usuario.query.get(id)
print(user.email)
print(user.id)
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
@app.route("/verPedidos",methods=["GET"])
def ver_Pedidos():
return render_template("AdministrarPedido.html")
@app.errorhandler(404)
def not_found(error=None):
msg =jsonify({
"holas": "c q es obvio pero... algo anda mal",
"mensaje": "Recurso no encontrado: "+ request.url,
"status": 404
})
return msg
#main + TAB
if __name__ == "__main__":
app.run(debug=True, port=8000) | #/////////////////////////////////////// | random_line_split |
app.py | from flask import Flask, request, render_template, jsonify, redirect, url_for, session, flash
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, IntegerField, FloatField, SelectField
from wtforms.validators import InputRequired, Email, Length, Optional
from werkzeug.security import generate_password_hash, check_password_hash
import views
# initializations
app = Flask(__name__)
app.config['SECRET_KEY']='estoessecretoXD!'
Bootstrap(app)
# PostreSQL Connection
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:password@localhost/postgres'
#para q no mande alertas cuando hagamos modificaciones (opcional)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
# MySQL Connection
#app.config['MYSQL_HOST'] = 'localhost'
#app.config['MYSQL_USER'] = 'root'
#app.config['MYSQL_PASSWORD'] = 'password'
#app.config['MYSQL_DB'] = 'flaskcontacts'
#mysql = MySQL(app)
#instancia de la bd postrge
db = SQLAlchemy(app) #para usar la bd en otra aplicaciones colocar () sin mas
#instancia de marshmallow
ma = Marshmallow(app)
class Usuario(db.Model):
#El orm requere una columna id obligatoriamente
id = db.Column(db.Integer, primary_key = True)
nombre = db.Column(db.String(40), unique=True)
contra = db.Column(db.String(1000))
email = db.Column(db.String(100), unique=True)
telefono = db.Column(db.String(9))
#Constructor q se ejecuta por cada instancia de la clase
def __init__(self, nombre, contra, email, telefono):
self.nombre = nombre
self.contra = contra
self.email = email
self.telefono = telefono
class Producto(db.Model):
id = db.Column(db.Integer, primary_key = True)
nombreProd = db.Column(db.String(40))
precio = db.Column(db.Float)
cantidad = db.Column(db.Integer)
categoria = db.Column(db.String(20))
descripcion = db.Column(db.String(100))
imagen = db.Column(db.String(50))
def __init__(self, nombreProd, precio, cantidad, categoria, descripcion, imagen):
self.nombreProd = nombreProd
self.precio = precio
self.cantidad = cantidad
self.categoria = categoria
self.descripcion = descripcion
self.imagen = imagen
class Pedido(db.Model):
id = db.Column(db.Integer, primary_key = True)
producto_id = db.Column(db.Integer, db.ForeignKey('producto.id'))
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id'))
cantidad = db.Column(db.Integer)
precio_uni = db.Column(db.Float)
precio_total = db.Column(db.Float)
estado = db.Column(db.String(20))
def __init__(self, producto_id, usuario_id, cantidad, precio_uni, precio_total, estado):
self.producto_id = producto_id
self.usuario_id = usuario_id
self.cantidad = cantidad
self.precio_uni = precio_uni
self.precio_total = precio_total
self.estado = estado
#sentencia para crear todas las tablas
db.create_all()
#creacion de esquema para Usuario
class UsuarioSchema(ma.Schema):
class Meta:
fields = ('id', 'nombre', 'contra', 'email', 'telefono')#señalo campos que quiero cada vez que interactue con el esquema
usuario_schema = UsuarioSchema() #permite interactuar con un usuario a la vez
usuarios_schema = UsuarioSchema(many=True) #con varios
class ProductoSchema(ma.Schema):
class Meta:
fields = ("id", "nombreProd", "precio", "cantidad", "categoria", "descripcion", "imagen")
producto_schema = ProductoSchema()
productos_schema = ProductoSchema(many=True)
class PedidoSchema(ma.Schema):
class Meta:
fields = ("id", "producto_id", "usuario_id", "cantidad", "precio_uni", "precio_total", "estado")
pedido_schema = PedidoSchema()
pedidos_schema = PedidoSchema(many=True)
#HASTA AQUI TERMINA LA DEFINICION DE LA BASE DE DATOS
@app.route('/')
def Index():
return render_template("index.html")
#///////////////////////////////////////
# OPERACIONES CON USUARIO - INCIO
#///////////////////////////////////////
#URL para crear Usuarios
@app.route('/crearUsuario', methods=['POST'])
def create_user():
#print(request.json)
#return 'received'
nombre = request.json['nombre']
contra = request.json['contra']
email = request.json['email']
telefono = request.json['telefono']
contra_cifrada = generate_password_hash(contra)
#check_password_hash(pwhash, password)
contra_noCifrada = check_password_hash(contra_cifrada, contra)
print(contra_noCifrada)
new_user = Usuario(nombre, contra_cifrada, email, telefono) #Creo un Usuario
db.session.add(new_user) #lo cargo a la BD
db.session.commit() #termino la operacion
return usuario_schema.jsonify(new_user) #devuelvwe el usuario creado al front
#URL para listar Usuarios
@app.route("/listarUsuarios", methods=["GET"])
def get_users():
all_users = Usuario.query.all() #devuelve todos los usuarios
#print("ALL_USERS: ",type(all_users))
#result = usuarios_schema.dump(all_users) #graba la lista de usuario recuperados
#print("RESULT: ",type(result))
#print(result)
#return jsonify(result) #devulve el resultado al cliente en formato JSON
return render_template('ListarUsuariosAdmin.html',lista = all_users)
#URL para buscar un Usuario específico
@app.route("/listarUsuarios/<id>", methods=["GET"])
def get_user(id):
user = Usuario.query.get_or_404(id) #si no funciona quitar _or_404
#return usuario_schema.jsonify(user)
return "Usuario: %s / Email: %s / Telefono: %s" % (user.nombre, user.email, user.telefono)
#URL para actualizar usuario por id tbm funcion con metodo POST x si hay error en el front
@app.route("/actualizarUsuario", methods=["GET","POST"])
def update_user():
form = RegisterForm()
id = session["id_user"]
#recupera al usuario
user = Usuario.query.get(id)
print("GAAAAAAAAA")
if request.method == "GET":
print("GEEET")
return render_template('ActualizarDatos.html', nombre=user.nombre, email=user.email, telefono=user.telefono, form=form)
else:
if form.validate_on_submit():
if user:
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
user.nombre = form.nombre.data
user.contra = contra_cifrada
user.email = form.email.data
user.telefono = form.telefono.data
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.commit() #termino la operacion
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
return("ERROR")
'''
#recupera los campos del request
nombre = request.json["nombre"]
contra = request.json["contra"]
email = request.json["email"]
telefono = request.json["telefono"]
contra_cifrada = generate_password_hash(contra)
#actualiza los campos
user.nombre = nombre
user.contra = contra
user.email = email
user.telefono = telefono
#guarda los cambios
db.session.commit()
return usuario_schema.jsonify(user)'''
@app.route("/eliminarUsuario", methods=["POST"])
def delete_user():
id = session["id_user"]
user = Usuario.query.get(id) #busca al usuario
print(user)
db.session.delete(user) #lo elimina
db.session.commit() #guarda cambios
#if "user" in session:
session.pop("user")
session.pop("id_user")
return render_template('index.html')
#return usuario_schema.jsonify(user) #devuelve el usuario eliminado
#///////////////////////////////////////
# OPERACIONES CON USUARIO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - INCIO
#///////////////////////////////////////
class ProductForm(FlaskForm):#Crea el formulario de regisgtro de productos
nombreProd=StringField('nombreProd',validators=[InputRequired(), Length(min=1,max=30)])
precio = FloatField('precio',validators=[InputRequired()])
cantidad = IntegerField('cantidad',validators=[InputRequired()])
categoria=SelectField('categoria', validators=[InputRequired()], choices=[("LA","Lácteos"),("EN","Enlatados"),("CE","Carnes y embutidos"),("PL","Productos de limpieza"),("FV","Frutas y Verduras")])
descripcion=TextAreaField('descripcion',validators=[Optional(), Length(min=2,max=100)])
imagen=StringField('imagen',validators=[Optional(), Length(min=2,max=50)])
#@app.route("/admin",methods=["GET"])
#def indexAdmin():
# return render_template("Listarproductos.html")
#LISTAR PRODUCTOS POR CATEGORIA (LISTA TODOS POR DEFAULT)
@app.route("/admin/", methods=["GET"])
@app.route("/admin/<cat>", methods=["GET"])
def get_products_by_cat(cat="ALL"):
products = Producto.query.all() #devuelve una lista
p_filtrados = [] #lista vacia
cat = request.args.get('cat')
opciones=["LA","EN","CE","PL","FV"]
if (cat in opciones):
for p in products:
if(cat == p.categoria):
p_filtrados.append(p)
else:
p_filtrados = products
#res = productos_schema.dump(p_filtrados) #convierte la lista en un esquema de productos
#return jsonify(res) #devuelve el esquema convertido a json
return render_template('Listarproductos.html',listaProd = p_filtrados)
@app.route('/crearProducto', methods=['GET','POST'])
def create_product():
form = ProductForm()
if request.method == "GET":
return render_template("AgregarProducto.html",form=form)
else:
if form.validate_on_submit():
nuevo_producto=Producto(nombreProd=form.nombreProd.data, precio=form.precio.data, cantidad=form.cantidad.data, categoria=form.categoria.data, descripcion=form.descripcion.data, imagen=form.imagen.data)
db.session.add(nuevo_producto) #lo cargo a la BD
db.session.commit() #termino la operacion
#user=Usuario.query.filter_by(nombre=(session["user"])).first()
#return render_template("Listarproductos.html")
#print("LE TONGUEEEEE")
return redirect(url_for("get_products_by_cat"))
#return render_template('Registradoconexito.html')
#print("GAAAAAA")
return redirect(url_for("get_products_by_cat"))
'''
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
new_prod = Producto(nombreProd, precio, cantidad, categoria, descripcion, imagen)
db.session.add(new_prod)
db.session.commit()
return producto_schema.jsonify(new_prod)'''
@app.route("/listarProductos", methods=["GET"])
def get_products():
all_prods = Producto.query.all()
result = productos_schema.dump(all_prods)
#print(result)
return jsonify(result)
@app.route("/actualizarProducto/<id>", methods=["PUT"])
def update_product(id):
#recupera al producto
prod = Producto.query.get(id)
#recupera los campos del request
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
#actualiza los campos
prod.nombreProd = nombreProd
prod.precio = precio
prod.cantidad = cantidad
prod.categoria = categoria
prod.descripcion = descripcion
prod.imagen = imagen
#guarda los cambios
db.session.commit()
return producto_schema.jsonify(prod)
@app.route("/eliminarProducto/<id>", methods=["POST"])
def delete_product(id):
prod = Producto.query.get(id)
db.session.delete(prod)
db.session.commit()
return redirect(url_for("get_products_by_cat"))
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# LOGIN - Inicio (operaciones con el usuario)
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
class LoginForm(FlaskForm):
email= StringField('Email',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('Contraseña',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/login", methods=['GET','POST'])
def login():
form = LoginForm()
#if "user" in session:
# print("segundo" +session["user"])
# return render_template('index.html')
# print("GAAAAAA")
if form.validate_on_submit():
#print("primer" + session["user"])
user=Usuario.query.filter_by(email=form.email.data).first()
if user:
if check_password_hash(user.contra,form.contra.data):
session["user"] = user.nombre
session["id_user"]= user.id
print(session["user"] )
print(session["id_user"])
#success_message = 'Bienvenido {}'.format(user.nombre)
#flash(success_message)
print("LOGGGEADOOOOO")
#return render_template('Categorizacion.html')
return redirect(url_for('see_products')) #va el nombre de la funcion, no de la ruta
error_message = "Usuario o contraseña incorrectos"
flash(error_message)
return render_template('signin.html', form=form)
return render_template('signin.html', form=form)
@app.route('/verProductos', methods=['GET'])
def see_products():
return render_template('Categorizacion.html')
@app.route("/logout", methods=['GET','POST'])
def logout():
if "user" in session:
sessi | if "id_user" in session:
session.pop("id_user")
return render_template('index.html')
class RegisterForm(FlaskForm):#Crea el formulario de regisgtro del usuario
email=StringField('email',validators=[InputRequired(), Length(min=4,max=30)])
nombre=StringField('nombre',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('contra',validators=[InputRequired(), Length(min=8,max=30)])
telefono=StringField('telefono',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/registrarse", methods=['GET','POST'])
def registro():
form = RegisterForm()
if form.validate_on_submit():
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
nuevo_usuario=Usuario(nombre=form.nombre.data, contra=contra_cifrada, email=form.email.data, telefono=form.telefono.data)
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.add(nuevo_usuario) #lo cargo a la BD
db.session.commit() #termino la operacion
user=Usuario.query.filter_by(nombre=(session["user"])).first()
session["id_user"]= user.id
return render_template('Registradoconexito.html')
#nombre contra email telf------> Atributos del Usuario
return render_template('Registrate.html', form=form)
@app.route("/miCuenta", methods=['GET','POST'])
def revisarMiCuenta():
id = session["id_user"]
user = Usuario.query.get(id)
print(user.email)
print(user.id)
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
@app.route("/verPedidos",methods=["GET"])
def ver_Pedidos():
return render_template("AdministrarPedido.html")
@app.errorhandler(404)
def not_found(error=None):
msg =jsonify({
"holas": "c q es obvio pero... algo anda mal",
"mensaje": "Recurso no encontrado: "+ request.url,
"status": 404
})
return msg
#main + TAB
if __name__ == "__main__":
app.run(debug=True, port=8000) | on.pop("user")
| conditional_block |
app.py | from flask import Flask, request, render_template, jsonify, redirect, url_for, session, flash
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, IntegerField, FloatField, SelectField
from wtforms.validators import InputRequired, Email, Length, Optional
from werkzeug.security import generate_password_hash, check_password_hash
import views
# initializations
app = Flask(__name__)
app.config['SECRET_KEY']='estoessecretoXD!'
Bootstrap(app)
# PostreSQL Connection
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:password@localhost/postgres'
#para q no mande alertas cuando hagamos modificaciones (opcional)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
# MySQL Connection
#app.config['MYSQL_HOST'] = 'localhost'
#app.config['MYSQL_USER'] = 'root'
#app.config['MYSQL_PASSWORD'] = 'password'
#app.config['MYSQL_DB'] = 'flaskcontacts'
#mysql = MySQL(app)
#instancia de la bd postrge
db = SQLAlchemy(app) #para usar la bd en otra aplicaciones colocar () sin mas
#instancia de marshmallow
ma = Marshmallow(app)
class Usuario(db.Model):
#El orm requere una columna id obligatoriamente
id = db.Column(db.Integer, primary_key = True)
nombre = db.Column(db.String(40), unique=True)
contra = db.Column(db.String(1000))
email = db.Column(db.String(100), unique=True)
telefono = db.Column(db.String(9))
#Constructor q se ejecuta por cada instancia de la clase
def __init__(self, nombre, contra, email, telefono):
self.nombre = nombre
self.contra = contra
self.email = email
self.telefono = telefono
class Producto(db.Model):
id = db.Column(db.Integer, primary_key = True)
nombreProd = db.Column(db.String(40))
precio = db.Column(db.Float)
cantidad = db.Column(db.Integer)
categoria = db.Column(db.String(20))
descripcion = db.Column(db.String(100))
imagen = db.Column(db.String(50))
def __init__(self, nombreProd, precio, cantidad, categoria, descripcion, imagen):
self.nombreProd = nombreProd
self.precio = precio
self.cantidad = cantidad
self.categoria = categoria
self.descripcion = descripcion
self.imagen = imagen
class Pedido(db.Model):
id = db.Column(db.Integer, primary_key = True)
producto_id = db.Column(db.Integer, db.ForeignKey('producto.id'))
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id'))
cantidad = db.Column(db.Integer)
precio_uni = db.Column(db.Float)
precio_total = db.Column(db.Float)
estado = db.Column(db.String(20))
def __init__(self, producto_id, usuario_id, cantidad, precio_uni, precio_total, estado):
self.producto_id = producto_id
self.usuario_id = usuario_id
self.cantidad = cantidad
self.precio_uni = precio_uni
self.precio_total = precio_total
self.estado = estado
#sentencia para crear todas las tablas
db.create_all()
#creacion de esquema para Usuario
class UsuarioSchema(ma.Schema):
class Meta:
fields = ('id', 'nombre', 'contra', 'email', 'telefono')#señalo campos que quiero cada vez que interactue con el esquema
usuario_schema = UsuarioSchema() #permite interactuar con un usuario a la vez
usuarios_schema = UsuarioSchema(many=True) #con varios
class ProductoSchema(ma.Schema):
class Meta:
fields = ("id", "nombreProd", "precio", "cantidad", "categoria", "descripcion", "imagen")
producto_schema = ProductoSchema()
productos_schema = ProductoSchema(many=True)
class PedidoSchema(ma.Schema):
class Meta:
fields = ("id", "producto_id", "usuario_id", "cantidad", "precio_uni", "precio_total", "estado")
pedido_schema = PedidoSchema()
pedidos_schema = PedidoSchema(many=True)
#HASTA AQUI TERMINA LA DEFINICION DE LA BASE DE DATOS
@app.route('/')
def Index():
return render_template("index.html")
#///////////////////////////////////////
# OPERACIONES CON USUARIO - INCIO
#///////////////////////////////////////
#URL para crear Usuarios
@app.route('/crearUsuario', methods=['POST'])
def create_user():
#print(request.json)
#return 'received'
nombre = request.json['nombre']
contra = request.json['contra']
email = request.json['email']
telefono = request.json['telefono']
contra_cifrada = generate_password_hash(contra)
#check_password_hash(pwhash, password)
contra_noCifrada = check_password_hash(contra_cifrada, contra)
print(contra_noCifrada)
new_user = Usuario(nombre, contra_cifrada, email, telefono) #Creo un Usuario
db.session.add(new_user) #lo cargo a la BD
db.session.commit() #termino la operacion
return usuario_schema.jsonify(new_user) #devuelvwe el usuario creado al front
#URL para listar Usuarios
@app.route("/listarUsuarios", methods=["GET"])
def get_users():
all_users = Usuario.query.all() #devuelve todos los usuarios
#print("ALL_USERS: ",type(all_users))
#result = usuarios_schema.dump(all_users) #graba la lista de usuario recuperados
#print("RESULT: ",type(result))
#print(result)
#return jsonify(result) #devulve el resultado al cliente en formato JSON
return render_template('ListarUsuariosAdmin.html',lista = all_users)
#URL para buscar un Usuario específico
@app.route("/listarUsuarios/<id>", methods=["GET"])
def get_user(id):
user = Usuario.query.get_or_404(id) #si no funciona quitar _or_404
#return usuario_schema.jsonify(user)
return "Usuario: %s / Email: %s / Telefono: %s" % (user.nombre, user.email, user.telefono)
#URL para actualizar usuario por id tbm funcion con metodo POST x si hay error en el front
@app.route("/actualizarUsuario", methods=["GET","POST"])
def update_user():
form = RegisterForm()
id = session["id_user"]
#recupera al usuario
user = Usuario.query.get(id)
print("GAAAAAAAAA")
if request.method == "GET":
print("GEEET")
return render_template('ActualizarDatos.html', nombre=user.nombre, email=user.email, telefono=user.telefono, form=form)
else:
if form.validate_on_submit():
if user:
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
user.nombre = form.nombre.data
user.contra = contra_cifrada
user.email = form.email.data
user.telefono = form.telefono.data
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.commit() #termino la operacion
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
return("ERROR")
'''
#recupera los campos del request
nombre = request.json["nombre"]
contra = request.json["contra"]
email = request.json["email"]
telefono = request.json["telefono"]
contra_cifrada = generate_password_hash(contra)
#actualiza los campos
user.nombre = nombre
user.contra = contra
user.email = email
user.telefono = telefono
#guarda los cambios
db.session.commit()
return usuario_schema.jsonify(user)'''
@app.route("/eliminarUsuario", methods=["POST"])
def delete_user():
id = session["id_user"]
user = Usuario.query.get(id) #busca al usuario
print(user)
db.session.delete(user) #lo elimina
db.session.commit() #guarda cambios
#if "user" in session:
session.pop("user")
session.pop("id_user")
return render_template('index.html')
#return usuario_schema.jsonify(user) #devuelve el usuario eliminado
#///////////////////////////////////////
# OPERACIONES CON USUARIO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - INCIO
#///////////////////////////////////////
class ProductForm(FlaskForm):#Crea el formulario de regisgtro de productos
nombreProd=StringField('nombreProd',validators=[InputRequired(), Length(min=1,max=30)])
precio = FloatField('precio',validators=[InputRequired()])
cantidad = IntegerField('cantidad',validators=[InputRequired()])
categoria=SelectField('categoria', validators=[InputRequired()], choices=[("LA","Lácteos"),("EN","Enlatados"),("CE","Carnes y embutidos"),("PL","Productos de limpieza"),("FV","Frutas y Verduras")])
descripcion=TextAreaField('descripcion',validators=[Optional(), Length(min=2,max=100)])
imagen=StringField('imagen',validators=[Optional(), Length(min=2,max=50)])
#@app.route("/admin",methods=["GET"])
#def indexAdmin():
# return render_template("Listarproductos.html")
#LISTAR PRODUCTOS POR CATEGORIA (LISTA TODOS POR DEFAULT)
@app.route("/admin/", methods=["GET"])
@app.route("/admin/<cat>", methods=["GET"])
def get_products_by_cat(cat="ALL"):
products = Producto.query.all() #devuelve una lista
p_filtrados = [] #lista vacia
cat = request.args.get('cat')
opciones=["LA","EN","CE","PL","FV"]
if (cat in opciones):
for p in products:
if(cat == p.categoria):
p_filtrados.append(p)
else:
p_filtrados = products
#res = productos_schema.dump(p_filtrados) #convierte la lista en un esquema de productos
#return jsonify(res) #devuelve el esquema convertido a json
return render_template('Listarproductos.html',listaProd = p_filtrados)
@app.route('/crearProducto', methods=['GET','POST'])
def cre |
form = ProductForm()
if request.method == "GET":
return render_template("AgregarProducto.html",form=form)
else:
if form.validate_on_submit():
nuevo_producto=Producto(nombreProd=form.nombreProd.data, precio=form.precio.data, cantidad=form.cantidad.data, categoria=form.categoria.data, descripcion=form.descripcion.data, imagen=form.imagen.data)
db.session.add(nuevo_producto) #lo cargo a la BD
db.session.commit() #termino la operacion
#user=Usuario.query.filter_by(nombre=(session["user"])).first()
#return render_template("Listarproductos.html")
#print("LE TONGUEEEEE")
return redirect(url_for("get_products_by_cat"))
#return render_template('Registradoconexito.html')
#print("GAAAAAA")
return redirect(url_for("get_products_by_cat"))
'''
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
new_prod = Producto(nombreProd, precio, cantidad, categoria, descripcion, imagen)
db.session.add(new_prod)
db.session.commit()
return producto_schema.jsonify(new_prod)'''
@app.route("/listarProductos", methods=["GET"])
def get_products():
all_prods = Producto.query.all()
result = productos_schema.dump(all_prods)
#print(result)
return jsonify(result)
@app.route("/actualizarProducto/<id>", methods=["PUT"])
def update_product(id):
#recupera al producto
prod = Producto.query.get(id)
#recupera los campos del request
nombreProd = request.json['nombreProd']
precio = request.json['precio']
cantidad = request.json['cantidad']
categoria = request.json["categoria"]
descripcion = request.json["descripcion"]
imagen = request.json["imagen"]
#actualiza los campos
prod.nombreProd = nombreProd
prod.precio = precio
prod.cantidad = cantidad
prod.categoria = categoria
prod.descripcion = descripcion
prod.imagen = imagen
#guarda los cambios
db.session.commit()
return producto_schema.jsonify(prod)
@app.route("/eliminarProducto/<id>", methods=["POST"])
def delete_product(id):
prod = Producto.query.get(id)
db.session.delete(prod)
db.session.commit()
return redirect(url_for("get_products_by_cat"))
#///////////////////////////////////////
# OPERACIONES DE PRODUCTO - FIN
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
#///////////////////////////////////////
# LOGIN - Inicio (operaciones con el usuario)
#///////////////////////////////////////
#-----------------------------------------------------------------------------------------------------------------
class LoginForm(FlaskForm):
email= StringField('Email',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('Contraseña',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/login", methods=['GET','POST'])
def login():
form = LoginForm()
#if "user" in session:
# print("segundo" +session["user"])
# return render_template('index.html')
# print("GAAAAAA")
if form.validate_on_submit():
#print("primer" + session["user"])
user=Usuario.query.filter_by(email=form.email.data).first()
if user:
if check_password_hash(user.contra,form.contra.data):
session["user"] = user.nombre
session["id_user"]= user.id
print(session["user"] )
print(session["id_user"])
#success_message = 'Bienvenido {}'.format(user.nombre)
#flash(success_message)
print("LOGGGEADOOOOO")
#return render_template('Categorizacion.html')
return redirect(url_for('see_products')) #va el nombre de la funcion, no de la ruta
error_message = "Usuario o contraseña incorrectos"
flash(error_message)
return render_template('signin.html', form=form)
return render_template('signin.html', form=form)
@app.route('/verProductos', methods=['GET'])
def see_products():
return render_template('Categorizacion.html')
@app.route("/logout", methods=['GET','POST'])
def logout():
if "user" in session:
session.pop("user")
if "id_user" in session:
session.pop("id_user")
return render_template('index.html')
class RegisterForm(FlaskForm):#Crea el formulario de regisgtro del usuario
email=StringField('email',validators=[InputRequired(), Length(min=4,max=30)])
nombre=StringField('nombre',validators=[InputRequired(), Length(min=4,max=30)])
contra= PasswordField('contra',validators=[InputRequired(), Length(min=8,max=30)])
telefono=StringField('telefono',validators=[InputRequired(), Length(min=4,max=30)])
@app.route("/registrarse", methods=['GET','POST'])
def registro():
form = RegisterForm()
if form.validate_on_submit():
contrase=form.contra.data
contra_cifrada=generate_password_hash(contrase)
nuevo_usuario=Usuario(nombre=form.nombre.data, contra=contra_cifrada, email=form.email.data, telefono=form.telefono.data)
session["user"] = form.nombre.data
print(contra_cifrada)
db.session.add(nuevo_usuario) #lo cargo a la BD
db.session.commit() #termino la operacion
user=Usuario.query.filter_by(nombre=(session["user"])).first()
session["id_user"]= user.id
return render_template('Registradoconexito.html')
#nombre contra email telf------> Atributos del Usuario
return render_template('Registrate.html', form=form)
@app.route("/miCuenta", methods=['GET','POST'])
def revisarMiCuenta():
id = session["id_user"]
user = Usuario.query.get(id)
print(user.email)
print(user.id)
return render_template('GestionarCuenta.html', nombre=user.nombre, email=user.email, telefono=user.telefono)
@app.route("/verPedidos",methods=["GET"])
def ver_Pedidos():
return render_template("AdministrarPedido.html")
@app.errorhandler(404)
def not_found(error=None):
msg =jsonify({
"holas": "c q es obvio pero... algo anda mal",
"mensaje": "Recurso no encontrado: "+ request.url,
"status": 404
})
return msg
#main + TAB
if __name__ == "__main__":
app.run(debug=True, port=8000) | ate_product(): | identifier_name |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&& !line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn | (&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x| !x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
}
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if !self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if !self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
}
| module_used | identifier_name |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&& !line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn module_used(&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x| !x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() |
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if !self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if !self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
}
| {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
} | conditional_block |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&& !line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn module_used(&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x| !x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
}
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
}
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone(); | self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if !self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
} | } else if !self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{ | random_line_split |
Python3_original.rs | #[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct Python3_original {
support_level: SupportLevel,
data: DataHolder,
code: String,
imports: String,
interpreter: String,
main_file_path: String,
plugin_root: String,
cache_dir: String,
venv: Option<String>,
}
impl Python3_original {
fn fetch_imports(&mut self) -> Result<(), SniprunError> {
if self.support_level < SupportLevel::Import {
return Ok(());
}
let mut v = vec![];
let mut errored = true;
if let Some(real_nvim_instance) = self.data.nvim_instance.clone() {
info!("got real nvim isntance");
let mut rvi = real_nvim_instance.lock().unwrap();
if let Ok(buffer) = rvi.get_current_buf() {
info!("got buffer");
if let Ok(buf_lines) = buffer.get_lines(&mut rvi, 0, -1, false) {
info!("got lines in buffer");
v = buf_lines;
errored = false;
}
}
}
if errored {
return Err(SniprunError::FetchCodeError);
}
info!("lines are : {:?}", v);
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
{
self.code = self.data.current_bloc.clone();
}
for line in v.iter() {
// info!("lines are : {}", line);
if (line.trim().starts_with("import ") || line.trim().starts_with("from ")) //basic selection
&& !line.trim().starts_with('#')
&& self.module_used(line, &self.code)
{
// embed in try catch blocs in case uneeded module is unavailable
self.imports = self.imports.clone() + "\n" + line;
}
}
info!("import founds : {:?}", self.imports);
Ok(())
}
fn module_used(&self, line: &str, code: &str) -> bool {
info!(
"checking for python module usage: line {} in code {}",
line, code
);
if line.contains('*') {
return true;
}
if line.contains(" as ") {
if let Some(name) = line.split(' ').last() {
return code.contains(name);
}
}
for name in line
.replace(",", " ")
.replace("from", " ")
.replace("import ", " ")
.split(' ')
.filter(|&x| !x.is_empty())
{
if code.contains(name.trim()) {
return true;
}
}
false
}
fn fetch_config(&mut self) {
let default_compiler = String::from("python3");
if let Some(used_compiler) = Python3_original::get_interpreter_option(&self.get_data(), "interpreter") {
if let Some(compiler_string) = used_compiler.as_str() {
info!("Using custom compiler: {}", compiler_string);
self.interpreter = compiler_string.to_string();
}
}
self.interpreter = default_compiler;
if let Ok(path) = env::current_dir() {
if let Some(venv_array_config) = Python3_original::get_interpreter_option(&self.get_data(), "venv") {
if let Some(actual_vec_of_venv) = venv_array_config.as_array() {
for possible_venv in actual_vec_of_venv.iter() {
if let Some(possible_venv_str) = possible_venv.as_str() {
let venv_abs_path = path.to_str().unwrap().to_owned()
+ "/"
+ possible_venv_str
+ "/bin/activate_this.py";
if std::path::Path::new(&venv_abs_path).exists() {
self.venv = Some(venv_abs_path);
break;
}
}
}
}
}
}
}
}
impl Interpreter for Python3_original {
fn new_with_level(data: DataHolder, level: SupportLevel) -> Box<Python3_original> {
//create a subfolder in the cache folder
let rwd = data.work_dir.clone() + "/python3_original";
let mut builder = DirBuilder::new();
builder.recursive(true);
builder
.create(&rwd)
.expect("Could not create directory for python3-original");
//pre-create string pointing to main file's and binary's path
let mfp = rwd.clone() + "/main.py";
let pgr = data.sniprun_root_dir.clone();
Box::new(Python3_original {
data,
support_level: level,
code: String::from(""),
imports: String::from(""),
main_file_path: mfp,
plugin_root: pgr,
cache_dir: rwd,
interpreter: String::new(),
venv: None,
})
}
fn check_cli_args(&self) -> Result<(), SniprunError> |
fn get_name() -> String {
String::from("Python3_original")
}
fn behave_repl_like_default() -> bool {
false
}
fn has_repl_capability() -> bool {
true
}
fn default_for_filetype() -> bool {
true
}
fn get_supported_languages() -> Vec<String> {
vec![
String::from("Python 3"),
String::from("python"),
String::from("python3"),
String::from("py"),
]
}
fn get_current_level(&self) -> SupportLevel {
self.support_level
}
fn set_current_level(&mut self, level: SupportLevel) {
self.support_level = level;
}
fn get_data(&self) -> DataHolder {
self.data.clone()
}
fn get_max_support_level() -> SupportLevel {
SupportLevel::Import
}
fn fetch_code(&mut self) -> Result<(), SniprunError> {
self.fetch_config();
self.fetch_imports()?;
if !self
.data
.current_bloc
.replace(&[' ', '\t', '\n', '\r'][..], "")
.is_empty()
&& self.get_current_level() >= SupportLevel::Bloc
{
self.code = self.data.current_bloc.clone();
} else if !self.data.current_line.replace(" ", "").is_empty()
&& self.get_current_level() >= SupportLevel::Line
{
self.code = self.data.current_line.clone();
} else {
self.code = String::from("");
}
Ok(())
}
fn add_boilerplate(&mut self) -> Result<(), SniprunError> {
if !self.imports.is_empty() {
let mut indented_imports = String::new();
for import in self.imports.lines() {
indented_imports = indented_imports + "\t" + import + "\n";
}
self.imports = String::from("\ntry:\n") + &indented_imports + "\nexcept:\n\tpass\n";
}
let mut source_venv = String::new();
if let Some(venv_path) = &self.venv {
info!("loading venv: {}", venv_path);
source_venv = source_venv + "\n" + "activate_this_file = \"" + venv_path + "\"";
source_venv += "\nexec(compile(open(activate_this_file, \"rb\").read(), activate_this_file, 'exec'), dict(__file__=activate_this_file))\n";
}
self.code = source_venv
+ &self.imports.clone()
+ &unindent(&format!("{}{}", "\n", self.code.as_str()));
Ok(())
}
fn build(&mut self) -> Result<(), SniprunError> {
// info!("python code:\n {}", self.code);
write(&self.main_file_path, &self.code)
.expect("Unable to write to file for python3_original");
Ok(())
}
fn execute(&mut self) -> Result<String, SniprunError> {
let output = Command::new(&self.interpreter)
.arg(&self.main_file_path)
.args(&self.get_data().cli_args)
.output()
.expect("Unable to start process");
if output.status.success() {
Ok(String::from_utf8(output.stdout).unwrap())
} else {
return Err(SniprunError::RuntimeError(
String::from_utf8(output.stderr.clone())
.unwrap()
.lines()
.last()
.unwrap_or(&String::from_utf8(output.stderr).unwrap())
.to_owned(),
));
}
}
}
impl ReplLikeInterpreter for Python3_original {
fn fetch_code_repl(&mut self) -> Result<(), SniprunError> {
self.fetch_code()
}
fn build_repl(&mut self) -> Result<(), SniprunError> {
self.build()
}
fn execute_repl(&mut self) -> Result<String, SniprunError> {
self.execute()
}
fn add_boilerplate_repl(&mut self) -> Result<(), SniprunError> {
info!("begins add boilerplate repl");
//load save & load functions
let mut path_to_python_functions = self.plugin_root.clone();
path_to_python_functions.push_str("/src/interpreters/Python3_original/saveload.py");
let python_functions = std::fs::read_to_string(&path_to_python_functions).unwrap();
let klepto_memo = String::from("'") + &self.cache_dir.clone() + "/" + "memo" + "'";
let mut final_code = self.imports.clone();
final_code.push('\n');
final_code.push_str(&python_functions);
final_code.push('\n');
if self.read_previous_code().is_empty() {
//first run
self.save_code("Not the first run anymore".to_string());
} else {
//not the first run, should load old variables
{
final_code.push_str("sniprun142859_load(");
final_code.push_str(&klepto_memo);
final_code.push(')');
}
final_code.push('\n');
}
final_code.push_str(&unindent(&format!("{}{}", "\n", self.code.as_str())));
final_code.push('\n');
{
final_code.push_str("sniprun142859_save("); // if the run has not failed, save new variables
final_code.push_str(&klepto_memo);
final_code.push(')');
}
self.code = final_code.clone();
// info!("---{}---", &final_code);
Ok(())
}
}
#[cfg(test)]
mod test_python3_original {
use super::*;
use crate::*;
use crate::test_main::*;
#[test]
fn simple_print() {
let mut data = DataHolder::new();
data.current_bloc = String::from("print(\"lol\",1);");
let mut interpreter = Python3_original::new(data);
let res = interpreter.run_at_level(SupportLevel::Bloc);
// should panic if not an Ok()
let string_result = res.unwrap();
assert_eq!(string_result, "lol 1\n");
}
fn test_repl() {
let mut event_handler = fake_event();
event_handler.fill_data(&fake_msgpack());
event_handler.data.filetype = String::from("python");
event_handler.data.current_bloc = String::from("a=1");
event_handler.data.repl_enabled = vec![String::from("Python3_original")];
event_handler.data.sniprun_root_dir = String::from(".");
//run the launcher (that selects, init and run an interpreter)
let launcher = launcher::Launcher::new(event_handler.data.clone());
let _result = launcher.select_and_run();
event_handler.data.current_bloc = String::from("print(a)");
let launcher = launcher::Launcher::new(event_handler.data.clone());
let result = launcher.select_and_run();
assert!(result.is_ok());
}
}
| {
// All cli arguments are sendable to python
// Though they will be ignored in REPL mode
Ok(())
} | identifier_body |
launch_fishbowl.py | from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QProgressBar, QComboBox, QDesktopWidget, \
QGridLayout, QSlider, QGroupBox, QVBoxLayout, QHBoxLayout, QStyle, QScrollBar, QMainWindow, QAction, QDialog
from PyQt5.QtCore import QDateTime, Qt, QTimer, QPoint, pyqtSignal, QLineF
from PyQt5.QtGui import QFont, QColor, QPainter, QBrush, QPen, QPalette
import time
import numpy as np
import threading
from collections import deque
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import random
class DynamicLabel(QLabel):
signal = pyqtSignal(object)
def __init__(self, base_text):
super().__init__()
self.font = QFont("Times", 30, QFont.Bold)
self.setFont(self.font)
self.base_text = base_text
self.setText(self.base_text + "0")
self.signal.connect(lambda x: self.setText(self.base_text + x))
class NPC():
allowed_dirs = np.array([[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]])
def __init__(self, diameter, fishbowl_diameter):
self.d = diameter / fishbowl_diameter
self.original_color = Qt.black
self.color = Qt.black
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.pvdi = np.array([0, 0])
self.first = True
self.dead = False
def move(self):
if not self.dead:
self.coords = self.coords + self.v
if not self.inside_sphere(self.coords) or self.first:
if not self.first:
forbidden_dirs = [self.pvdi - 1 if self.pvdi != 0 else len(self.allowed_dirs) - 1,
self.pvdi,
self.pvdi + 1 if self.pvdi != len(self.allowed_dirs) - 1 else 0]
available_dirs = np.delete(self.allowed_dirs, forbidden_dirs, axis=0)
chosen_dir = np.random.randint(low=0, high=len(available_dirs))
self.dir = available_dirs[chosen_dir, :]
self.pvdi = np.where(np.all(self.allowed_dirs == self.dir, axis=1))[0][0]
else:
chosen_dir = np.random.randint(low=0, high=len(self.allowed_dirs))
self.dir = self.allowed_dirs[chosen_dir, :]
self.pvdi = chosen_dir
self.first = False
self.v = self.dir * np.random.randint(low=40, high=100, size=2) * 0.00002
self.coords = self.spherical_clip(self.coords)
def check_killed_by(self, player):
p = player.coords
e = self.coords
dist = self.dist(e, p)
if dist < self.d:
self.dead = True
self.color = Qt.gray
@staticmethod
def dist(a, b):
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def revive(self):
self.color = self.original_color
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.first = True
self.dead = False
def spherical_clip(self, p, r=0.499):
r -= self.d/2
p = np.array(p)
dist = np.sqrt(np.sum(p ** 2))
if dist > r:
p = p * (r / dist)
return p
def inside_sphere(self, p, r=0.50):
|
class Enemy(NPC):
def __init__(self, diameter, fishbowl_diameter):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.red
self.color = Qt.red
class Player(NPC):
"""
https://keon.io/deep-q-learning/
"""
def __init__(self, diameter, fishbowl_diameter, state_size):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.blue
self.color = Qt.blue
# ----------------------------
self.state_size = state_size
self.action_size = 2 # we move in a 2D world
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def move(self, enemies_coords): # TODO apparently PEP8 does not let you change the args in an inhereted method...
# build current state
state = np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0)
state = np.reshape(state, (1, -1))
# choose an action
action = np.squeeze(self.model.predict(state))
# update player coords
self.coords = self.coords + (action - 1) * 0.002
if not self.inside_sphere(self.coords):
self.coords = self.spherical_clip(self.coords)
# compute reward
reward = 1 / np.min([self.dist(self.coords, x) for x in enemies_coords])
# print(reward)
# build next state
next_state = np.reshape(np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0), (1, -1))
# store state to memory
self.remember(state, action, reward, next_state, False)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='sigmoid'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * \
np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
class Fishbowl(QWidget):
animation_emitter = pyqtSignal(object)
def __init__(self, n_games_signal):
super().__init__()
self.render_bowl = True
# connect signal from emitter to trigger the animation
self.animation_emitter.connect(lambda x: self._move_npcs(x))
self.fishbowl_color = Qt.black
self.fishbowl_size = 350
self.wheel_size = self.fishbowl_size * 1.15
self.wheel_width = self.fishbowl_size * 0.15
self.npc_size = self.fishbowl_size * 0.3
self.fishbowl_border_size = self.fishbowl_size * 0.004
self.fishbowl_thin_border_size = self.fishbowl_size * 0.002
self.nenemies = 10
self.enemies = [Enemy(self.npc_size, self.fishbowl_size) for _ in range(self.nenemies)]
self.player = Player(self.npc_size, self.fishbowl_size, (self.nenemies + 1) * 2)
self.start_flag = True
self.start_time = None
self.time_advantage = 2
self.n_games = 0
self.n_games_signal = n_games_signal
def scale_point(self, point):
original_max = 0.5
new_max = self.fishbowl_size
return ((p / original_max) * new_max for p in point)
def _move_npcs(self, command):
# start moving enemies but not player yet
if self.start_flag:
self.start_flag = False
self.start_time = time.time()
go_player = time.time() - self.start_time > self.time_advantage
# check someone is still alive
alive_enemies = [x for x in self.enemies if not x.dead]
if not alive_enemies:
if len(self.player.memory) > 32:
self.player.replay(32)
self.restart_game()
return
for enemy in alive_enemies:
enemy.move()
# check dead
if go_player:
enemy.check_killed_by(self.player)
if go_player:
pass
self.player.move([x.coords if not x.dead else [-1, -1] for x in self.enemies])
if self.render_bowl:
self.repaint()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
c = self.rect().center()
c_coords = c.x(), c.y()
background_color = self.palette().color(QPalette.Background)
# paint inner trackpad
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw fishbowl
qp.setBrush(QBrush(Qt.gray, Qt.SolidPattern))
qp.drawEllipse(c, *([self.fishbowl_size] * 2))
# draw axis lines
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_thin_border_size, Qt.DashDotDotLine))
for angle in range(0, 420, 45):
line = QLineF(); line.setP1(c); line.setAngle(angle); line.setLength(self.fishbowl_size)
qp.drawLine(line)
# draw wheel separators
line = QLineF(); line.setP1(c + QPoint(self.wheel_size, 0)); line.setAngle(0); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, -self.wheel_size)); line.setAngle(90); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(-self.wheel_size, 0)); line.setAngle(180); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, self.wheel_size)); line.setAngle(270); line.setLength(self.wheel_width)
qp.drawLine(line)
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw dead enemies
for i, enemy in enumerate([x for x in self.enemies if x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw alive enemies
for i, enemy in enumerate([x for x in self.enemies if not x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw player
qp.setBrush(QBrush(self.player.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(self.player.coords)), *([self.npc_size] * 2))
def animate_balls(self):
self.update_thread = threading.Thread(target=self._animate_balls)
self.update_thread.daemon = True
self.update_thread.start()
def _animate_balls(self):
while True:
time.sleep(0.00001)
self.animation_emitter.emit("animate")
def restart_game(self):
self.n_games += 1
self.n_games_signal.emit(str(self.n_games))
self.start_flag = True
for enemy in self.enemies:
enemy.revive()
self.player.revive()
class gameUI:
def __init__(self, UI_name="fishbowl"):
self.app = QApplication([UI_name])
self.app.setObjectName(UI_name)
self.UI_name = UI_name
# set app style
self.app.setStyle("Fusion")
# create main window
self.window = QMainWindow()
self.window.setWindowTitle(UI_name)
self.window.setObjectName(UI_name)
self.main_group = QGroupBox()
self.window.setCentralWidget(self.main_group)
# set window geometry
ag = QDesktopWidget().availableGeometry()
self.window.move(int(ag.width()*0.15), int(ag.height()*0.05))
self.window.setMinimumWidth(int(ag.width()*0.3))
self.window.setMinimumHeight(int(ag.height()*0.4))
self.layout = QGridLayout()
self.n_games_label = DynamicLabel("Game ")
self.layout.addWidget(self.n_games_label, 0,0,1,10)
self.fishbowl = Fishbowl(self.n_games_label.signal)
self.layout.addWidget(self.fishbowl, 1, 0, 10, 10)
self.main_group.setLayout(self.layout)
# set layout inside window
self.window.setLayout(self.layout)
self.window.show()
def start_ui(self):
"""
starts the ball animation thread and launches the QT app
"""
self.start_animation()
self.app.exec()
def start_animation(self):
"""
waits 1 second so that the QT app is running and then launches the ball animation thread
"""
time.sleep(1)
self.fishbowl.animate_balls()
if __name__ == "__main__":
ui = gameUI()
ui.start_ui()
| r -= self.d/2
dist = np.sqrt(np.sum(np.array(p) ** 2))
if dist > r:
return False
else:
return True | identifier_body |
launch_fishbowl.py | from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QProgressBar, QComboBox, QDesktopWidget, \
QGridLayout, QSlider, QGroupBox, QVBoxLayout, QHBoxLayout, QStyle, QScrollBar, QMainWindow, QAction, QDialog
from PyQt5.QtCore import QDateTime, Qt, QTimer, QPoint, pyqtSignal, QLineF
from PyQt5.QtGui import QFont, QColor, QPainter, QBrush, QPen, QPalette
import time
import numpy as np
import threading
from collections import deque
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import random
class DynamicLabel(QLabel):
signal = pyqtSignal(object)
def __init__(self, base_text):
super().__init__()
self.font = QFont("Times", 30, QFont.Bold)
self.setFont(self.font)
self.base_text = base_text
self.setText(self.base_text + "0")
self.signal.connect(lambda x: self.setText(self.base_text + x))
class NPC():
allowed_dirs = np.array([[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]])
def __init__(self, diameter, fishbowl_diameter):
self.d = diameter / fishbowl_diameter
self.original_color = Qt.black
self.color = Qt.black
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.pvdi = np.array([0, 0])
self.first = True
self.dead = False
def move(self):
if not self.dead:
self.coords = self.coords + self.v
if not self.inside_sphere(self.coords) or self.first:
if not self.first:
forbidden_dirs = [self.pvdi - 1 if self.pvdi != 0 else len(self.allowed_dirs) - 1,
self.pvdi,
self.pvdi + 1 if self.pvdi != len(self.allowed_dirs) - 1 else 0]
available_dirs = np.delete(self.allowed_dirs, forbidden_dirs, axis=0)
chosen_dir = np.random.randint(low=0, high=len(available_dirs))
self.dir = available_dirs[chosen_dir, :]
self.pvdi = np.where(np.all(self.allowed_dirs == self.dir, axis=1))[0][0]
else:
chosen_dir = np.random.randint(low=0, high=len(self.allowed_dirs))
self.dir = self.allowed_dirs[chosen_dir, :]
self.pvdi = chosen_dir
self.first = False
self.v = self.dir * np.random.randint(low=40, high=100, size=2) * 0.00002
self.coords = self.spherical_clip(self.coords)
def check_killed_by(self, player):
p = player.coords
e = self.coords
dist = self.dist(e, p)
if dist < self.d:
self.dead = True
self.color = Qt.gray
@staticmethod
def dist(a, b):
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def revive(self):
self.color = self.original_color
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.first = True
self.dead = False
def spherical_clip(self, p, r=0.499):
r -= self.d/2
p = np.array(p)
dist = np.sqrt(np.sum(p ** 2))
if dist > r:
p = p * (r / dist)
return p
def inside_sphere(self, p, r=0.50):
r -= self.d/2
dist = np.sqrt(np.sum(np.array(p) ** 2))
if dist > r:
return False
else:
return True
class Enemy(NPC):
def __init__(self, diameter, fishbowl_diameter):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.red
self.color = Qt.red
class Player(NPC):
"""
https://keon.io/deep-q-learning/
"""
def __init__(self, diameter, fishbowl_diameter, state_size):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.blue
self.color = Qt.blue
# ----------------------------
self.state_size = state_size
self.action_size = 2 # we move in a 2D world
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def move(self, enemies_coords): # TODO apparently PEP8 does not let you change the args in an inhereted method...
# build current state
state = np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0)
state = np.reshape(state, (1, -1))
# choose an action
action = np.squeeze(self.model.predict(state))
# update player coords
self.coords = self.coords + (action - 1) * 0.002
if not self.inside_sphere(self.coords):
self.coords = self.spherical_clip(self.coords)
# compute reward
reward = 1 / np.min([self.dist(self.coords, x) for x in enemies_coords])
# print(reward)
# build next state
next_state = np.reshape(np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0), (1, -1))
# store state to memory
self.remember(state, action, reward, next_state, False)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='sigmoid'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * \
np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
class Fishbowl(QWidget):
animation_emitter = pyqtSignal(object)
def __init__(self, n_games_signal):
super().__init__()
self.render_bowl = True
# connect signal from emitter to trigger the animation
self.animation_emitter.connect(lambda x: self._move_npcs(x))
self.fishbowl_color = Qt.black
self.fishbowl_size = 350
self.wheel_size = self.fishbowl_size * 1.15
self.wheel_width = self.fishbowl_size * 0.15
self.npc_size = self.fishbowl_size * 0.3
self.fishbowl_border_size = self.fishbowl_size * 0.004
self.fishbowl_thin_border_size = self.fishbowl_size * 0.002
self.nenemies = 10
self.enemies = [Enemy(self.npc_size, self.fishbowl_size) for _ in range(self.nenemies)]
self.player = Player(self.npc_size, self.fishbowl_size, (self.nenemies + 1) * 2)
self.start_flag = True
self.start_time = None
self.time_advantage = 2
self.n_games = 0
self.n_games_signal = n_games_signal
def scale_point(self, point):
original_max = 0.5
new_max = self.fishbowl_size
return ((p / original_max) * new_max for p in point)
def _move_npcs(self, command):
# start moving enemies but not player yet
if self.start_flag:
self.start_flag = False
self.start_time = time.time()
go_player = time.time() - self.start_time > self.time_advantage
# check someone is still alive
alive_enemies = [x for x in self.enemies if not x.dead]
if not alive_enemies:
if len(self.player.memory) > 32:
self.player.replay(32)
self.restart_game()
return
for enemy in alive_enemies:
enemy.move()
# check dead
if go_player:
enemy.check_killed_by(self.player)
if go_player:
pass
self.player.move([x.coords if not x.dead else [-1, -1] for x in self.enemies])
if self.render_bowl:
self.repaint()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
c = self.rect().center()
c_coords = c.x(), c.y()
background_color = self.palette().color(QPalette.Background)
# paint inner trackpad
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw fishbowl
qp.setBrush(QBrush(Qt.gray, Qt.SolidPattern))
qp.drawEllipse(c, *([self.fishbowl_size] * 2))
# draw axis lines
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_thin_border_size, Qt.DashDotDotLine))
for angle in range(0, 420, 45):
line = QLineF(); line.setP1(c); line.setAngle(angle); line.setLength(self.fishbowl_size)
qp.drawLine(line)
# draw wheel separators
line = QLineF(); line.setP1(c + QPoint(self.wheel_size, 0)); line.setAngle(0); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, -self.wheel_size)); line.setAngle(90); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(-self.wheel_size, 0)); line.setAngle(180); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, self.wheel_size)); line.setAngle(270); line.setLength(self.wheel_width)
qp.drawLine(line)
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw dead enemies
for i, enemy in enumerate([x for x in self.enemies if x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw alive enemies
for i, enemy in enumerate([x for x in self.enemies if not x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw player
qp.setBrush(QBrush(self.player.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(self.player.coords)), *([self.npc_size] * 2))
def animate_balls(self):
self.update_thread = threading.Thread(target=self._animate_balls)
self.update_thread.daemon = True
self.update_thread.start()
def _animate_balls(self):
while True:
time.sleep(0.00001)
self.animation_emitter.emit("animate")
def restart_game(self):
self.n_games += 1
self.n_games_signal.emit(str(self.n_games))
self.start_flag = True
for enemy in self.enemies:
enemy.revive()
self.player.revive()
class gameUI:
def __init__(self, UI_name="fishbowl"):
self.app = QApplication([UI_name])
self.app.setObjectName(UI_name)
self.UI_name = UI_name
# set app style
self.app.setStyle("Fusion")
# create main window
self.window = QMainWindow()
self.window.setWindowTitle(UI_name)
self.window.setObjectName(UI_name)
self.main_group = QGroupBox()
self.window.setCentralWidget(self.main_group)
# set window geometry
ag = QDesktopWidget().availableGeometry()
self.window.move(int(ag.width()*0.15), int(ag.height()*0.05))
self.window.setMinimumWidth(int(ag.width()*0.3))
self.window.setMinimumHeight(int(ag.height()*0.4))
self.layout = QGridLayout()
self.n_games_label = DynamicLabel("Game ")
self.layout.addWidget(self.n_games_label, 0,0,1,10)
self.fishbowl = Fishbowl(self.n_games_label.signal)
self.layout.addWidget(self.fishbowl, 1, 0, 10, 10)
self.main_group.setLayout(self.layout)
# set layout inside window
self.window.setLayout(self.layout)
self.window.show()
def start_ui(self):
"""
starts the ball animation thread and launches the QT app
"""
self.start_animation()
self.app.exec()
def | (self):
"""
waits 1 second so that the QT app is running and then launches the ball animation thread
"""
time.sleep(1)
self.fishbowl.animate_balls()
if __name__ == "__main__":
ui = gameUI()
ui.start_ui()
| start_animation | identifier_name |
launch_fishbowl.py | from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QProgressBar, QComboBox, QDesktopWidget, \
QGridLayout, QSlider, QGroupBox, QVBoxLayout, QHBoxLayout, QStyle, QScrollBar, QMainWindow, QAction, QDialog
from PyQt5.QtCore import QDateTime, Qt, QTimer, QPoint, pyqtSignal, QLineF
from PyQt5.QtGui import QFont, QColor, QPainter, QBrush, QPen, QPalette
import time
import numpy as np
import threading
from collections import deque
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import random
class DynamicLabel(QLabel):
signal = pyqtSignal(object)
def __init__(self, base_text):
super().__init__()
self.font = QFont("Times", 30, QFont.Bold)
self.setFont(self.font)
self.base_text = base_text
self.setText(self.base_text + "0")
self.signal.connect(lambda x: self.setText(self.base_text + x))
class NPC():
allowed_dirs = np.array([[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]])
def __init__(self, diameter, fishbowl_diameter):
self.d = diameter / fishbowl_diameter
self.original_color = Qt.black
self.color = Qt.black
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.pvdi = np.array([0, 0])
self.first = True
self.dead = False
def move(self):
if not self.dead:
self.coords = self.coords + self.v
if not self.inside_sphere(self.coords) or self.first:
if not self.first:
forbidden_dirs = [self.pvdi - 1 if self.pvdi != 0 else len(self.allowed_dirs) - 1,
self.pvdi,
self.pvdi + 1 if self.pvdi != len(self.allowed_dirs) - 1 else 0]
available_dirs = np.delete(self.allowed_dirs, forbidden_dirs, axis=0)
chosen_dir = np.random.randint(low=0, high=len(available_dirs))
self.dir = available_dirs[chosen_dir, :]
self.pvdi = np.where(np.all(self.allowed_dirs == self.dir, axis=1))[0][0]
else:
|
self.first = False
self.v = self.dir * np.random.randint(low=40, high=100, size=2) * 0.00002
self.coords = self.spherical_clip(self.coords)
def check_killed_by(self, player):
p = player.coords
e = self.coords
dist = self.dist(e, p)
if dist < self.d:
self.dead = True
self.color = Qt.gray
@staticmethod
def dist(a, b):
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def revive(self):
self.color = self.original_color
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.first = True
self.dead = False
def spherical_clip(self, p, r=0.499):
r -= self.d/2
p = np.array(p)
dist = np.sqrt(np.sum(p ** 2))
if dist > r:
p = p * (r / dist)
return p
def inside_sphere(self, p, r=0.50):
r -= self.d/2
dist = np.sqrt(np.sum(np.array(p) ** 2))
if dist > r:
return False
else:
return True
class Enemy(NPC):
def __init__(self, diameter, fishbowl_diameter):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.red
self.color = Qt.red
class Player(NPC):
"""
https://keon.io/deep-q-learning/
"""
def __init__(self, diameter, fishbowl_diameter, state_size):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.blue
self.color = Qt.blue
# ----------------------------
self.state_size = state_size
self.action_size = 2 # we move in a 2D world
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def move(self, enemies_coords): # TODO apparently PEP8 does not let you change the args in an inhereted method...
# build current state
state = np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0)
state = np.reshape(state, (1, -1))
# choose an action
action = np.squeeze(self.model.predict(state))
# update player coords
self.coords = self.coords + (action - 1) * 0.002
if not self.inside_sphere(self.coords):
self.coords = self.spherical_clip(self.coords)
# compute reward
reward = 1 / np.min([self.dist(self.coords, x) for x in enemies_coords])
# print(reward)
# build next state
next_state = np.reshape(np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0), (1, -1))
# store state to memory
self.remember(state, action, reward, next_state, False)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='sigmoid'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * \
np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
class Fishbowl(QWidget):
animation_emitter = pyqtSignal(object)
def __init__(self, n_games_signal):
super().__init__()
self.render_bowl = True
# connect signal from emitter to trigger the animation
self.animation_emitter.connect(lambda x: self._move_npcs(x))
self.fishbowl_color = Qt.black
self.fishbowl_size = 350
self.wheel_size = self.fishbowl_size * 1.15
self.wheel_width = self.fishbowl_size * 0.15
self.npc_size = self.fishbowl_size * 0.3
self.fishbowl_border_size = self.fishbowl_size * 0.004
self.fishbowl_thin_border_size = self.fishbowl_size * 0.002
self.nenemies = 10
self.enemies = [Enemy(self.npc_size, self.fishbowl_size) for _ in range(self.nenemies)]
self.player = Player(self.npc_size, self.fishbowl_size, (self.nenemies + 1) * 2)
self.start_flag = True
self.start_time = None
self.time_advantage = 2
self.n_games = 0
self.n_games_signal = n_games_signal
def scale_point(self, point):
original_max = 0.5
new_max = self.fishbowl_size
return ((p / original_max) * new_max for p in point)
def _move_npcs(self, command):
# start moving enemies but not player yet
if self.start_flag:
self.start_flag = False
self.start_time = time.time()
go_player = time.time() - self.start_time > self.time_advantage
# check someone is still alive
alive_enemies = [x for x in self.enemies if not x.dead]
if not alive_enemies:
if len(self.player.memory) > 32:
self.player.replay(32)
self.restart_game()
return
for enemy in alive_enemies:
enemy.move()
# check dead
if go_player:
enemy.check_killed_by(self.player)
if go_player:
pass
self.player.move([x.coords if not x.dead else [-1, -1] for x in self.enemies])
if self.render_bowl:
self.repaint()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
c = self.rect().center()
c_coords = c.x(), c.y()
background_color = self.palette().color(QPalette.Background)
# paint inner trackpad
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw fishbowl
qp.setBrush(QBrush(Qt.gray, Qt.SolidPattern))
qp.drawEllipse(c, *([self.fishbowl_size] * 2))
# draw axis lines
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_thin_border_size, Qt.DashDotDotLine))
for angle in range(0, 420, 45):
line = QLineF(); line.setP1(c); line.setAngle(angle); line.setLength(self.fishbowl_size)
qp.drawLine(line)
# draw wheel separators
line = QLineF(); line.setP1(c + QPoint(self.wheel_size, 0)); line.setAngle(0); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, -self.wheel_size)); line.setAngle(90); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(-self.wheel_size, 0)); line.setAngle(180); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, self.wheel_size)); line.setAngle(270); line.setLength(self.wheel_width)
qp.drawLine(line)
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw dead enemies
for i, enemy in enumerate([x for x in self.enemies if x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw alive enemies
for i, enemy in enumerate([x for x in self.enemies if not x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw player
qp.setBrush(QBrush(self.player.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(self.player.coords)), *([self.npc_size] * 2))
def animate_balls(self):
self.update_thread = threading.Thread(target=self._animate_balls)
self.update_thread.daemon = True
self.update_thread.start()
def _animate_balls(self):
while True:
time.sleep(0.00001)
self.animation_emitter.emit("animate")
def restart_game(self):
self.n_games += 1
self.n_games_signal.emit(str(self.n_games))
self.start_flag = True
for enemy in self.enemies:
enemy.revive()
self.player.revive()
class gameUI:
def __init__(self, UI_name="fishbowl"):
self.app = QApplication([UI_name])
self.app.setObjectName(UI_name)
self.UI_name = UI_name
# set app style
self.app.setStyle("Fusion")
# create main window
self.window = QMainWindow()
self.window.setWindowTitle(UI_name)
self.window.setObjectName(UI_name)
self.main_group = QGroupBox()
self.window.setCentralWidget(self.main_group)
# set window geometry
ag = QDesktopWidget().availableGeometry()
self.window.move(int(ag.width()*0.15), int(ag.height()*0.05))
self.window.setMinimumWidth(int(ag.width()*0.3))
self.window.setMinimumHeight(int(ag.height()*0.4))
self.layout = QGridLayout()
self.n_games_label = DynamicLabel("Game ")
self.layout.addWidget(self.n_games_label, 0,0,1,10)
self.fishbowl = Fishbowl(self.n_games_label.signal)
self.layout.addWidget(self.fishbowl, 1, 0, 10, 10)
self.main_group.setLayout(self.layout)
# set layout inside window
self.window.setLayout(self.layout)
self.window.show()
def start_ui(self):
"""
starts the ball animation thread and launches the QT app
"""
self.start_animation()
self.app.exec()
def start_animation(self):
"""
waits 1 second so that the QT app is running and then launches the ball animation thread
"""
time.sleep(1)
self.fishbowl.animate_balls()
if __name__ == "__main__":
ui = gameUI()
ui.start_ui()
| chosen_dir = np.random.randint(low=0, high=len(self.allowed_dirs))
self.dir = self.allowed_dirs[chosen_dir, :]
self.pvdi = chosen_dir | conditional_block |
launch_fishbowl.py | from PyQt5.QtWidgets import QApplication, QLabel, QWidget, QProgressBar, QComboBox, QDesktopWidget, \
QGridLayout, QSlider, QGroupBox, QVBoxLayout, QHBoxLayout, QStyle, QScrollBar, QMainWindow, QAction, QDialog
from PyQt5.QtCore import QDateTime, Qt, QTimer, QPoint, pyqtSignal, QLineF
from PyQt5.QtGui import QFont, QColor, QPainter, QBrush, QPen, QPalette
import time
import numpy as np
import threading
from collections import deque
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import random
class DynamicLabel(QLabel):
signal = pyqtSignal(object)
def __init__(self, base_text):
super().__init__()
self.font = QFont("Times", 30, QFont.Bold)
self.setFont(self.font)
self.base_text = base_text
self.setText(self.base_text + "0")
self.signal.connect(lambda x: self.setText(self.base_text + x))
class NPC():
allowed_dirs = np.array([[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]])
def __init__(self, diameter, fishbowl_diameter):
self.d = diameter / fishbowl_diameter
self.original_color = Qt.black
self.color = Qt.black
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.pvdi = np.array([0, 0])
self.first = True
self.dead = False
def move(self):
if not self.dead:
self.coords = self.coords + self.v
if not self.inside_sphere(self.coords) or self.first:
if not self.first:
forbidden_dirs = [self.pvdi - 1 if self.pvdi != 0 else len(self.allowed_dirs) - 1,
self.pvdi,
self.pvdi + 1 if self.pvdi != len(self.allowed_dirs) - 1 else 0]
available_dirs = np.delete(self.allowed_dirs, forbidden_dirs, axis=0)
chosen_dir = np.random.randint(low=0, high=len(available_dirs))
self.dir = available_dirs[chosen_dir, :]
self.pvdi = np.where(np.all(self.allowed_dirs == self.dir, axis=1))[0][0]
else:
chosen_dir = np.random.randint(low=0, high=len(self.allowed_dirs))
self.dir = self.allowed_dirs[chosen_dir, :]
self.pvdi = chosen_dir
self.first = False
self.v = self.dir * np.random.randint(low=40, high=100, size=2) * 0.00002
self.coords = self.spherical_clip(self.coords)
def check_killed_by(self, player):
p = player.coords
e = self.coords
dist = self.dist(e, p)
if dist < self.d:
self.dead = True
self.color = Qt.gray
@staticmethod
def dist(a, b):
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def revive(self):
self.color = self.original_color
self.coords = np.array([0, 0])
self.v = np.array([0, 0])
self.first = True
self.dead = False
def spherical_clip(self, p, r=0.499):
r -= self.d/2
p = np.array(p)
dist = np.sqrt(np.sum(p ** 2))
if dist > r:
p = p * (r / dist)
return p
def inside_sphere(self, p, r=0.50):
r -= self.d/2
dist = np.sqrt(np.sum(np.array(p) ** 2))
if dist > r:
return False
else:
return True
class Enemy(NPC):
def __init__(self, diameter, fishbowl_diameter):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.red
self.color = Qt.red
class Player(NPC):
"""
https://keon.io/deep-q-learning/
"""
def __init__(self, diameter, fishbowl_diameter, state_size):
super().__init__(diameter, fishbowl_diameter)
self.original_color = Qt.blue
self.color = Qt.blue
# ----------------------------
self.state_size = state_size
self.action_size = 2 # we move in a 2D world
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def move(self, enemies_coords): # TODO apparently PEP8 does not let you change the args in an inhereted method...
# build current state
state = np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0)
state = np.reshape(state, (1, -1))
# choose an action
action = np.squeeze(self.model.predict(state))
# update player coords
self.coords = self.coords + (action - 1) * 0.002
if not self.inside_sphere(self.coords):
self.coords = self.spherical_clip(self.coords)
# compute reward
reward = 1 / np.min([self.dist(self.coords, x) for x in enemies_coords])
# print(reward)
# build next state
next_state = np.reshape(np.concatenate([enemies_coords, np.reshape(self.coords, (1, -1))], axis=0), (1, -1))
# store state to memory
self.remember(state, action, reward, next_state, False)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='sigmoid'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = reward + self.gamma * \
np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
class Fishbowl(QWidget):
animation_emitter = pyqtSignal(object)
def __init__(self, n_games_signal):
super().__init__()
self.render_bowl = True
# connect signal from emitter to trigger the animation
self.animation_emitter.connect(lambda x: self._move_npcs(x))
self.fishbowl_color = Qt.black
self.fishbowl_size = 350
self.wheel_size = self.fishbowl_size * 1.15
self.wheel_width = self.fishbowl_size * 0.15
self.npc_size = self.fishbowl_size * 0.3
self.fishbowl_border_size = self.fishbowl_size * 0.004
self.fishbowl_thin_border_size = self.fishbowl_size * 0.002
self.nenemies = 10
self.enemies = [Enemy(self.npc_size, self.fishbowl_size) for _ in range(self.nenemies)]
self.player = Player(self.npc_size, self.fishbowl_size, (self.nenemies + 1) * 2)
self.start_flag = True
self.start_time = None
self.time_advantage = 2
self.n_games = 0
self.n_games_signal = n_games_signal
def scale_point(self, point):
original_max = 0.5
new_max = self.fishbowl_size
return ((p / original_max) * new_max for p in point)
def _move_npcs(self, command):
# start moving enemies but not player yet
if self.start_flag:
self.start_flag = False
self.start_time = time.time()
go_player = time.time() - self.start_time > self.time_advantage
# check someone is still alive
alive_enemies = [x for x in self.enemies if not x.dead]
if not alive_enemies:
if len(self.player.memory) > 32:
self.player.replay(32)
self.restart_game()
return
for enemy in alive_enemies:
enemy.move()
# check dead
if go_player:
enemy.check_killed_by(self.player)
if go_player:
pass
self.player.move([x.coords if not x.dead else [-1, -1] for x in self.enemies])
if self.render_bowl:
self.repaint()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
c = self.rect().center()
c_coords = c.x(), c.y()
background_color = self.palette().color(QPalette.Background)
# paint inner trackpad
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine))
# draw fishbowl
qp.setBrush(QBrush(Qt.gray, Qt.SolidPattern))
qp.drawEllipse(c, *([self.fishbowl_size] * 2))
# draw axis lines
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_thin_border_size, Qt.DashDotDotLine))
for angle in range(0, 420, 45):
line = QLineF(); line.setP1(c); line.setAngle(angle); line.setLength(self.fishbowl_size)
qp.drawLine(line)
# draw wheel separators
line = QLineF(); line.setP1(c + QPoint(self.wheel_size, 0)); line.setAngle(0); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, -self.wheel_size)); line.setAngle(90); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(-self.wheel_size, 0)); line.setAngle(180); line.setLength(self.wheel_width)
qp.drawLine(line)
line = QLineF(); line.setP1(c + QPoint(0, self.wheel_size)); line.setAngle(270); line.setLength(self.wheel_width)
qp.drawLine(line)
qp.setPen(QPen(self.fishbowl_color, self.fishbowl_border_size, Qt.SolidLine)) | # draw alive enemies
for i, enemy in enumerate([x for x in self.enemies if not x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2))
# draw player
qp.setBrush(QBrush(self.player.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(self.player.coords)), *([self.npc_size] * 2))
def animate_balls(self):
self.update_thread = threading.Thread(target=self._animate_balls)
self.update_thread.daemon = True
self.update_thread.start()
def _animate_balls(self):
while True:
time.sleep(0.00001)
self.animation_emitter.emit("animate")
def restart_game(self):
self.n_games += 1
self.n_games_signal.emit(str(self.n_games))
self.start_flag = True
for enemy in self.enemies:
enemy.revive()
self.player.revive()
class gameUI:
def __init__(self, UI_name="fishbowl"):
self.app = QApplication([UI_name])
self.app.setObjectName(UI_name)
self.UI_name = UI_name
# set app style
self.app.setStyle("Fusion")
# create main window
self.window = QMainWindow()
self.window.setWindowTitle(UI_name)
self.window.setObjectName(UI_name)
self.main_group = QGroupBox()
self.window.setCentralWidget(self.main_group)
# set window geometry
ag = QDesktopWidget().availableGeometry()
self.window.move(int(ag.width()*0.15), int(ag.height()*0.05))
self.window.setMinimumWidth(int(ag.width()*0.3))
self.window.setMinimumHeight(int(ag.height()*0.4))
self.layout = QGridLayout()
self.n_games_label = DynamicLabel("Game ")
self.layout.addWidget(self.n_games_label, 0,0,1,10)
self.fishbowl = Fishbowl(self.n_games_label.signal)
self.layout.addWidget(self.fishbowl, 1, 0, 10, 10)
self.main_group.setLayout(self.layout)
# set layout inside window
self.window.setLayout(self.layout)
self.window.show()
def start_ui(self):
"""
starts the ball animation thread and launches the QT app
"""
self.start_animation()
self.app.exec()
def start_animation(self):
"""
waits 1 second so that the QT app is running and then launches the ball animation thread
"""
time.sleep(1)
self.fishbowl.animate_balls()
if __name__ == "__main__":
ui = gameUI()
ui.start_ui() |
# draw dead enemies
for i, enemy in enumerate([x for x in self.enemies if x.dead]):
qp.setBrush(QBrush(enemy.color, Qt.SolidPattern))
qp.drawEllipse(c + QPoint(*self.scale_point(enemy.coords)), *([self.npc_size] * 2)) | random_line_split |
frameworks.rs | // stripped mac core foundation + metal layer only whats needed
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
pub use {
std::{
ffi::c_void,
os::raw::c_ulong,
ptr::NonNull,
},
crate::{
makepad_platform::{
os::apple::frameworks::*,
makepad_objc_sys::{
runtime::{Class, Object, Protocol, Sel, BOOL, YES, NO},
declare::ClassDecl,
msg_send,
sel,
class,
sel_impl,
Encode,
Encoding
},
},
}
};
// CORE AUDIO
pub const kAudioUnitManufacturer_Apple: u32 = 1634758764;
#[repr(C)] pub struct OpaqueAudioComponent([u8; 0]);
pub type CAudioComponent = *mut OpaqueAudioComponent;
#[repr(C)] pub struct ComponentInstanceRecord([u8; 0]);
pub type CAudioComponentInstance = *mut ComponentInstanceRecord;
pub type CAudioUnit = CAudioComponentInstance;
pub type OSStatus = i32;
#[repr(C)]
pub struct CAudioStreamBasicDescription {
pub mSampleRate: f64,
pub mFormatID: AudioFormatId,
pub mFormatFlags: u32,
pub mBytesPerPacket: u32,
pub mFramesPerPacket: u32,
pub mBytesPerFrame: u32,
pub mChannelsPerFrame: u32,
pub mBitsPerChannel: u32,
pub mReserved: u32,
}
#[repr(u32)]
pub enum AudioFormatId {
LinearPCM = 1819304813,
AC3 = 1633889587,
F60958AC3 = 1667326771,
AppleIMA4 = 1768775988,
MPEG4AAC = 1633772320,
MPEG4CELP = 1667591280,
MPEG4HVXC = 1752594531,
MPEG4TwinVQ = 1953986161,
MACE3 = 1296122675,
MACE6 = 1296122678,
ULaw = 1970037111,
ALaw = 1634492791,
QDesign = 1363430723,
QDesign2 = 1363430706,
QUALCOMM = 1365470320,
MPEGLayer1 = 778924081,
MPEGLayer2 = 778924082,
MPEGLayer3 = 778924083,
TimeCode = 1953066341,
MIDIStream = 1835623529,
ParameterValueStream = 1634760307,
AppleLossless = 1634492771,
MPEG4AAC_HE = 1633772392,
MPEG4AAC_LD = 1633772396,
MPEG4AAC_ELD = 1633772389,
MPEG4AAC_ELD_SBR = 1633772390,
MPEG4AAC_ELD_V2 = 1633772391,
MPEG4AAC_HE_V2 = 1633772400,
MPEG4AAC_Spatial = 1633772403,
AMR = 1935764850,
AMR_WB = 1935767394,
Audible = 1096107074,
iLBC = 1768710755,
DVIIntelIMA = 1836253201,
MicrosoftGSM = 1836253233,
AES3 = 1634038579,
}
/*
struct F60958AC3Flags;
impl F60958AC3Flags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
}
*/
/*
pub struct LinearPcmFlags;
impl LinearPcmFlags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
const FLAGS_SAMPLE_FRACTION_SHIFT: u32 = 7;
const FLAGS_SAMPLE_FRACTION_MASK: u32 = 8064;
}
pub struct AppleLosslessFlags;
impl AppleLosslessFlags {
const BIT_16_SOURCE_DATA: u32 = 1;
const BIT_20_SOURCE_DATA: u32 = 2;
const BIT_24_SOURCE_DATA: u32 = 3;
const BIT_32_SOURCE_DATA: u32 = 4;
}
*/
#[repr(u32)]
pub enum Mpeg4ObjectId {
AAC_Main = 1,
AAC_LC = 2,
AAC_SSR = 3,
AAC_LTP = 4,
AAC_SBR = 5,
AAC_Scalable = 6,
TwinVQ = 7,
CELP = 8,
HVXC = 9,
}
/*
pub struct AudioTimeStampFlags;
impl AudioTimeStampFlags {
const SAMPLE_TIME_VALID: u32 = 1;
const HOST_TIME_VALID: u32 = 2;
const RATE_SCALAR_VALID: u32 = 4;
const WORLD_CLOCK_TIME_VALID: u32 = 8;
const SMPTE_TIME_VALID: u32 = 16;
}
*/
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
pub struct CAudioComponentDescription {
pub componentType: CAudioUnitType,
pub componentSubType: CAudioUnitSubType,
pub componentManufacturer: u32,
pub componentFlags: u32,
pub componentFlagsMask: u32,
}
impl CAudioComponentDescription {
pub fn new_apple(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0,
}
}
pub fn new_all_manufacturers(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: 0,
componentFlags: 0,
componentFlagsMask: 0,
}
}
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct SMPTETime {
pub mSubframes: i16,
pub mSubframeDivisor: i16,
pub mCounter: u32,
pub mType: u32,
pub mFlags: u32,
pub mHours: i16,
pub mMinutes: i16,
pub mSeconds: i16,
pub mFrames: i16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _AudioBuffer {
pub mNumberChannels: u32,
pub mDataByteSize: u32,
pub mData: *mut ::std::os::raw::c_void,
}
pub const MAX_AUDIO_BUFFERS: usize = 8;
#[repr(C)]
pub struct CAudioBufferList {
pub mNumberBuffers: u32,
pub mBuffers: [_AudioBuffer; MAX_AUDIO_BUFFERS],
}
#[derive(Debug)]
#[repr(C)]
pub struct CAudioTimeStamp {
pub mSampleTime: f64,
pub mHostTime: u64,
pub mRateScalar: f64,
pub mWordClockTime: u64,
pub mSMPTETime: SMPTETime,
pub mFlags: u32,
pub mReserved: u32,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitType {
IO = 1635086197,
MusicDevice = 1635085685,
MusicEffect = 1635085670,
FormatConverter = 1635083875,
Effect = 1635083896,
Mixer = 1635085688,
Panner = 1635086446,
Generator = 1635084142,
OfflineEffect = 1635086188,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitSubType {
Undefined = 0,
PeakLimiter = 1819112562,
DynamicsProcessor = 1684237680,
LowPassFilter = 1819304307,
HighPassFilter = 1752195443,
BandPassFilter = 1651532147,
HighShelfFilter = 1752393830,
LowShelfFilter = 1819502694,
ParametricEQ = 1886217585,
Distortion = 1684632436,
Delay = 1684368505,
SampleDelay = 1935961209,
GraphicEQ = 1735550321,
MultiBandCompressor = 1835232624,
MatrixReverb = 1836213622,
Pitch = 1953329268,
AUFilter = 1718185076,
NetSend = 1853058660,
RogerBeep = 1919903602,
NBandEQ = 1851942257,
//pub enum FormatConverterType
AUConverter = 1668247158,
NewTimePitch = 1853191280,
//TimePitch = 1953329268,
DeferredRenderer = 1684366962,
Splitter = 1936747636,
Merger = 1835364967,
Varispeed = 1986097769,
AUiPodTimeOther = 1768977519,
//pub enum MixerType
MultiChannelMixer = 1835232632,
StereoMixer = 1936554098,
Mixer3D = 862219640,
MatrixMixer = 1836608888,
//pub enum GeneratorType {
ScheduledSoundPlayer = 1936945260,
AudioFilePlayer = 1634103404,
//pub enum MusicDeviceType {
DLSSynth = 1684828960,
Sampler = 1935764848,
//pub enum IOType {
GenericOutput = 1734700658,
HalOutput = 1634230636,
DefaultOutput = 1684366880,
SystemOutput = 1937339168,
VoiceProcessingIO = 1987078511,
RemoteIO = 1919512419,
}
#[derive(Debug)]
#[repr(i32)]
pub enum OSError {
Unimplemented = -4,
FileNotFound = -43,
FilePermission = -54,
TooManyFilesOpen = -42,
Unspecified = -1500,
SystemSoundClientMessageTimeout = -1501,
BadFilePath = 561017960,
Param = -50,
MemFull = -108,
FormatUnspecified = 2003329396,
UnknownProperty = 2003332927,
BadPropertySize = 561211770,
IllegalOperation = 1852797029,
UnsupportedFormat = 560226676,
State = 561214580,
NotEnoughBufferSpace = 560100710,
UnsupportedDataFormat = 1718449215,
InvalidProperty = -10879,
InvalidParameter = -10878,
InvalidElement = -10877,
NoConnection = -10876,
FailedInitialization = -10875,
TooManyFramesToProcess = -10874,
InvalidFile = -10871,
FormatNotSupported = -10868,
Uninitialized = -10867,
InvalidScope = -10866,
PropertyNotWritable = -10865,
CannotDoInCurrentContext = -10863,
InvalidPropertyValue = -10851,
PropertyNotInUse = -10850,
Initialized = -10849,
InvalidOfflineRender = -10848,
Unauthorized = -10847,
NoMatchingDefaultAudioUnitFound,
Unknown,
}
pub const kAudioComponentInstantiation_LoadInProcess: u32 = 2;
pub const kAudioComponentInstantiation_LoadOutOfProcess: u32 = 1;
impl OSError {
pub fn from(result: i32) -> Result<(), Self> {
Err(match result {
0 => return Ok(()),
x if x == Self::Unimplemented as i32 => Self::Unimplemented,
x if x == Self::FileNotFound as i32 => Self::FileNotFound,
x if x == Self::FilePermission as i32 => Self::FilePermission,
x if x == Self::TooManyFilesOpen as i32 => Self::TooManyFilesOpen,
x if x == Self::Unspecified as i32 => Self::Unspecified,
x if x == Self::SystemSoundClientMessageTimeout as i32 => Self::SystemSoundClientMessageTimeout,
x if x == Self::BadFilePath as i32 => Self::BadFilePath,
x if x == Self::Param as i32 => Self::Param,
x if x == Self::MemFull as i32 => Self::MemFull,
x if x == Self::FormatUnspecified as i32 => Self::FormatUnspecified,
x if x == Self::UnknownProperty as i32 => Self::UnknownProperty,
x if x == Self::BadPropertySize as i32 => Self::BadPropertySize,
x if x == Self::IllegalOperation as i32 => Self::IllegalOperation,
x if x == Self::UnsupportedFormat as i32 => Self::UnsupportedFormat,
x if x == Self::State as i32 => Self::State,
x if x == Self::NotEnoughBufferSpace as i32 => Self::NotEnoughBufferSpace,
x if x == Self::UnsupportedDataFormat as i32 => Self::UnsupportedDataFormat,
x if x == Self::InvalidProperty as i32 => Self::InvalidProperty,
x if x == Self::InvalidParameter as i32 => Self::InvalidParameter,
x if x == Self::InvalidElement as i32 => Self::InvalidElement,
x if x == Self::NoConnection as i32 => Self::NoConnection,
x if x == Self::FailedInitialization as i32 => Self::FailedInitialization,
x if x == Self::TooManyFramesToProcess as i32 => Self::TooManyFramesToProcess,
x if x == Self::InvalidFile as i32 => Self::InvalidFile,
x if x == Self::FormatNotSupported as i32 => Self::FormatNotSupported,
x if x == Self::Uninitialized as i32 => Self::Uninitialized,
x if x == Self::InvalidScope as i32 => Self::InvalidScope,
x if x == Self::PropertyNotWritable as i32 => Self::PropertyNotWritable,
x if x == Self::CannotDoInCurrentContext as i32 => Self::CannotDoInCurrentContext,
x if x == Self::InvalidPropertyValue as i32 => Self::InvalidPropertyValue,
x if x == Self::PropertyNotInUse as i32 => Self::PropertyNotInUse,
x if x == Self::Initialized as i32 => Self::Initialized,
x if x == Self::InvalidOfflineRender as i32 => Self::InvalidOfflineRender,
x if x == Self::Unauthorized as i32 => Self::Unauthorized,
_ => Self::Unknown
})
}
pub fn from_nserror(ns_error: ObjcId) -> Result<(), Self> {
if ns_error != nil {
let code: i32 = unsafe {msg_send![ns_error, code]};
Self::from(code)
}
else {
Ok(())
}
}
}
pub type ItemCount = u64;
pub type MIDIObjectRef = u32;
pub type MIDIClientRef = MIDIObjectRef;
pub type MIDIPortRef = MIDIObjectRef;
pub type MIDIEndpointRef = MIDIObjectRef;
pub type MIDIProtocolID = i32;
pub type MIDITimeStamp = u64;
pub const kMIDIProtocol_1_0: i32 = 1;
pub const kMIDIProtocol_2_0: i32 = 2;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct MIDINotification {
pub messageID: i32,
pub messageSize: u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct | {
pub protocol: MIDIProtocolID,
pub numPackets: u32,
pub packet: [MIDIEventPacket; 1usize],
}
#[repr(C, packed(4))]
#[derive(Copy, Clone)]
pub struct MIDIEventPacket {
pub timeStamp: MIDITimeStamp,
pub wordCount: u32,
pub words: [u32; 64usize],
}
#[link(name = "CoreMidi", kind = "framework")]
extern "C" {
pub static kMIDIPropertyManufacturer: CFStringRef;
pub static kMIDIPropertyDisplayName: CFStringRef;
pub static kMIDIPropertyUniqueID: CFStringRef;
pub fn MIDIGetNumberOfSources() -> ItemCount;
pub fn MIDIGetSource(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDIGetNumberOfDestinations() -> ItemCount;
pub fn MIDIGetDestination(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDISendEventList(
port: MIDIPortRef,
dest: MIDIEndpointRef,
evtlist: *const MIDIEventList,
) -> OSStatus;
pub fn MIDIClientCreateWithBlock(
name: CFStringRef,
outClient: *mut MIDIClientRef,
notifyBlock: ObjcId,
) -> OSStatus;
pub fn MIDIInputPortCreateWithProtocol(
client: MIDIClientRef,
portName: CFStringRef,
protocol: MIDIProtocolID,
outPort: *mut MIDIPortRef,
receiveBlock: ObjcId,
) -> OSStatus;
pub fn MIDIOutputPortCreate(
client: MIDIClientRef,
portName: CFStringRef,
outPort: *mut MIDIPortRef,
) -> OSStatus;
pub fn MIDIObjectGetStringProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
str_: *mut CFStringRef,
) -> OSStatus;
pub fn MIDIObjectGetIntegerProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
outValue: *mut i32,
) -> OSStatus;
pub fn MIDIPortConnectSource(
port: MIDIPortRef,
source: MIDIEndpointRef,
connRefCon: *mut ::std::os::raw::c_void,
) -> OSStatus;
}
| MIDIEventList | identifier_name |
frameworks.rs | // stripped mac core foundation + metal layer only whats needed
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
pub use {
std::{
ffi::c_void,
os::raw::c_ulong,
ptr::NonNull,
},
crate::{
makepad_platform::{
os::apple::frameworks::*,
makepad_objc_sys::{
runtime::{Class, Object, Protocol, Sel, BOOL, YES, NO},
declare::ClassDecl,
msg_send,
sel,
class,
sel_impl,
Encode,
Encoding
},
}, |
pub const kAudioUnitManufacturer_Apple: u32 = 1634758764;
#[repr(C)] pub struct OpaqueAudioComponent([u8; 0]);
pub type CAudioComponent = *mut OpaqueAudioComponent;
#[repr(C)] pub struct ComponentInstanceRecord([u8; 0]);
pub type CAudioComponentInstance = *mut ComponentInstanceRecord;
pub type CAudioUnit = CAudioComponentInstance;
pub type OSStatus = i32;
#[repr(C)]
pub struct CAudioStreamBasicDescription {
pub mSampleRate: f64,
pub mFormatID: AudioFormatId,
pub mFormatFlags: u32,
pub mBytesPerPacket: u32,
pub mFramesPerPacket: u32,
pub mBytesPerFrame: u32,
pub mChannelsPerFrame: u32,
pub mBitsPerChannel: u32,
pub mReserved: u32,
}
#[repr(u32)]
pub enum AudioFormatId {
LinearPCM = 1819304813,
AC3 = 1633889587,
F60958AC3 = 1667326771,
AppleIMA4 = 1768775988,
MPEG4AAC = 1633772320,
MPEG4CELP = 1667591280,
MPEG4HVXC = 1752594531,
MPEG4TwinVQ = 1953986161,
MACE3 = 1296122675,
MACE6 = 1296122678,
ULaw = 1970037111,
ALaw = 1634492791,
QDesign = 1363430723,
QDesign2 = 1363430706,
QUALCOMM = 1365470320,
MPEGLayer1 = 778924081,
MPEGLayer2 = 778924082,
MPEGLayer3 = 778924083,
TimeCode = 1953066341,
MIDIStream = 1835623529,
ParameterValueStream = 1634760307,
AppleLossless = 1634492771,
MPEG4AAC_HE = 1633772392,
MPEG4AAC_LD = 1633772396,
MPEG4AAC_ELD = 1633772389,
MPEG4AAC_ELD_SBR = 1633772390,
MPEG4AAC_ELD_V2 = 1633772391,
MPEG4AAC_HE_V2 = 1633772400,
MPEG4AAC_Spatial = 1633772403,
AMR = 1935764850,
AMR_WB = 1935767394,
Audible = 1096107074,
iLBC = 1768710755,
DVIIntelIMA = 1836253201,
MicrosoftGSM = 1836253233,
AES3 = 1634038579,
}
/*
struct F60958AC3Flags;
impl F60958AC3Flags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
}
*/
/*
pub struct LinearPcmFlags;
impl LinearPcmFlags {
const IS_FLOAT: u32 = 1;
const IS_BIG_ENDIAN: u32 = 2;
const IS_SIGNED_INTEGER: u32 = 4;
const IS_PACKED: u32 = 8;
const IS_ALIGNED_HIGH: u32 = 16;
const IS_NON_INTERLEAVED: u32 = 32;
const IS_NON_MIXABLE: u32 = 64;
const FLAGS_SAMPLE_FRACTION_SHIFT: u32 = 7;
const FLAGS_SAMPLE_FRACTION_MASK: u32 = 8064;
}
pub struct AppleLosslessFlags;
impl AppleLosslessFlags {
const BIT_16_SOURCE_DATA: u32 = 1;
const BIT_20_SOURCE_DATA: u32 = 2;
const BIT_24_SOURCE_DATA: u32 = 3;
const BIT_32_SOURCE_DATA: u32 = 4;
}
*/
#[repr(u32)]
pub enum Mpeg4ObjectId {
AAC_Main = 1,
AAC_LC = 2,
AAC_SSR = 3,
AAC_LTP = 4,
AAC_SBR = 5,
AAC_Scalable = 6,
TwinVQ = 7,
CELP = 8,
HVXC = 9,
}
/*
pub struct AudioTimeStampFlags;
impl AudioTimeStampFlags {
const SAMPLE_TIME_VALID: u32 = 1;
const HOST_TIME_VALID: u32 = 2;
const RATE_SCALAR_VALID: u32 = 4;
const WORLD_CLOCK_TIME_VALID: u32 = 8;
const SMPTE_TIME_VALID: u32 = 16;
}
*/
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
pub struct CAudioComponentDescription {
pub componentType: CAudioUnitType,
pub componentSubType: CAudioUnitSubType,
pub componentManufacturer: u32,
pub componentFlags: u32,
pub componentFlagsMask: u32,
}
impl CAudioComponentDescription {
pub fn new_apple(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0,
}
}
pub fn new_all_manufacturers(ty: CAudioUnitType, sub: CAudioUnitSubType) -> Self {
Self {
componentType: ty,
componentSubType: sub,
componentManufacturer: 0,
componentFlags: 0,
componentFlagsMask: 0,
}
}
}
#[derive(Debug, Default)]
#[repr(C)]
pub struct SMPTETime {
pub mSubframes: i16,
pub mSubframeDivisor: i16,
pub mCounter: u32,
pub mType: u32,
pub mFlags: u32,
pub mHours: i16,
pub mMinutes: i16,
pub mSeconds: i16,
pub mFrames: i16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _AudioBuffer {
pub mNumberChannels: u32,
pub mDataByteSize: u32,
pub mData: *mut ::std::os::raw::c_void,
}
pub const MAX_AUDIO_BUFFERS: usize = 8;
#[repr(C)]
pub struct CAudioBufferList {
pub mNumberBuffers: u32,
pub mBuffers: [_AudioBuffer; MAX_AUDIO_BUFFERS],
}
#[derive(Debug)]
#[repr(C)]
pub struct CAudioTimeStamp {
pub mSampleTime: f64,
pub mHostTime: u64,
pub mRateScalar: f64,
pub mWordClockTime: u64,
pub mSMPTETime: SMPTETime,
pub mFlags: u32,
pub mReserved: u32,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitType {
IO = 1635086197,
MusicDevice = 1635085685,
MusicEffect = 1635085670,
FormatConverter = 1635083875,
Effect = 1635083896,
Mixer = 1635085688,
Panner = 1635086446,
Generator = 1635084142,
OfflineEffect = 1635086188,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum CAudioUnitSubType {
Undefined = 0,
PeakLimiter = 1819112562,
DynamicsProcessor = 1684237680,
LowPassFilter = 1819304307,
HighPassFilter = 1752195443,
BandPassFilter = 1651532147,
HighShelfFilter = 1752393830,
LowShelfFilter = 1819502694,
ParametricEQ = 1886217585,
Distortion = 1684632436,
Delay = 1684368505,
SampleDelay = 1935961209,
GraphicEQ = 1735550321,
MultiBandCompressor = 1835232624,
MatrixReverb = 1836213622,
Pitch = 1953329268,
AUFilter = 1718185076,
NetSend = 1853058660,
RogerBeep = 1919903602,
NBandEQ = 1851942257,
//pub enum FormatConverterType
AUConverter = 1668247158,
NewTimePitch = 1853191280,
//TimePitch = 1953329268,
DeferredRenderer = 1684366962,
Splitter = 1936747636,
Merger = 1835364967,
Varispeed = 1986097769,
AUiPodTimeOther = 1768977519,
//pub enum MixerType
MultiChannelMixer = 1835232632,
StereoMixer = 1936554098,
Mixer3D = 862219640,
MatrixMixer = 1836608888,
//pub enum GeneratorType {
ScheduledSoundPlayer = 1936945260,
AudioFilePlayer = 1634103404,
//pub enum MusicDeviceType {
DLSSynth = 1684828960,
Sampler = 1935764848,
//pub enum IOType {
GenericOutput = 1734700658,
HalOutput = 1634230636,
DefaultOutput = 1684366880,
SystemOutput = 1937339168,
VoiceProcessingIO = 1987078511,
RemoteIO = 1919512419,
}
#[derive(Debug)]
#[repr(i32)]
pub enum OSError {
Unimplemented = -4,
FileNotFound = -43,
FilePermission = -54,
TooManyFilesOpen = -42,
Unspecified = -1500,
SystemSoundClientMessageTimeout = -1501,
BadFilePath = 561017960,
Param = -50,
MemFull = -108,
FormatUnspecified = 2003329396,
UnknownProperty = 2003332927,
BadPropertySize = 561211770,
IllegalOperation = 1852797029,
UnsupportedFormat = 560226676,
State = 561214580,
NotEnoughBufferSpace = 560100710,
UnsupportedDataFormat = 1718449215,
InvalidProperty = -10879,
InvalidParameter = -10878,
InvalidElement = -10877,
NoConnection = -10876,
FailedInitialization = -10875,
TooManyFramesToProcess = -10874,
InvalidFile = -10871,
FormatNotSupported = -10868,
Uninitialized = -10867,
InvalidScope = -10866,
PropertyNotWritable = -10865,
CannotDoInCurrentContext = -10863,
InvalidPropertyValue = -10851,
PropertyNotInUse = -10850,
Initialized = -10849,
InvalidOfflineRender = -10848,
Unauthorized = -10847,
NoMatchingDefaultAudioUnitFound,
Unknown,
}
pub const kAudioComponentInstantiation_LoadInProcess: u32 = 2;
pub const kAudioComponentInstantiation_LoadOutOfProcess: u32 = 1;
impl OSError {
pub fn from(result: i32) -> Result<(), Self> {
Err(match result {
0 => return Ok(()),
x if x == Self::Unimplemented as i32 => Self::Unimplemented,
x if x == Self::FileNotFound as i32 => Self::FileNotFound,
x if x == Self::FilePermission as i32 => Self::FilePermission,
x if x == Self::TooManyFilesOpen as i32 => Self::TooManyFilesOpen,
x if x == Self::Unspecified as i32 => Self::Unspecified,
x if x == Self::SystemSoundClientMessageTimeout as i32 => Self::SystemSoundClientMessageTimeout,
x if x == Self::BadFilePath as i32 => Self::BadFilePath,
x if x == Self::Param as i32 => Self::Param,
x if x == Self::MemFull as i32 => Self::MemFull,
x if x == Self::FormatUnspecified as i32 => Self::FormatUnspecified,
x if x == Self::UnknownProperty as i32 => Self::UnknownProperty,
x if x == Self::BadPropertySize as i32 => Self::BadPropertySize,
x if x == Self::IllegalOperation as i32 => Self::IllegalOperation,
x if x == Self::UnsupportedFormat as i32 => Self::UnsupportedFormat,
x if x == Self::State as i32 => Self::State,
x if x == Self::NotEnoughBufferSpace as i32 => Self::NotEnoughBufferSpace,
x if x == Self::UnsupportedDataFormat as i32 => Self::UnsupportedDataFormat,
x if x == Self::InvalidProperty as i32 => Self::InvalidProperty,
x if x == Self::InvalidParameter as i32 => Self::InvalidParameter,
x if x == Self::InvalidElement as i32 => Self::InvalidElement,
x if x == Self::NoConnection as i32 => Self::NoConnection,
x if x == Self::FailedInitialization as i32 => Self::FailedInitialization,
x if x == Self::TooManyFramesToProcess as i32 => Self::TooManyFramesToProcess,
x if x == Self::InvalidFile as i32 => Self::InvalidFile,
x if x == Self::FormatNotSupported as i32 => Self::FormatNotSupported,
x if x == Self::Uninitialized as i32 => Self::Uninitialized,
x if x == Self::InvalidScope as i32 => Self::InvalidScope,
x if x == Self::PropertyNotWritable as i32 => Self::PropertyNotWritable,
x if x == Self::CannotDoInCurrentContext as i32 => Self::CannotDoInCurrentContext,
x if x == Self::InvalidPropertyValue as i32 => Self::InvalidPropertyValue,
x if x == Self::PropertyNotInUse as i32 => Self::PropertyNotInUse,
x if x == Self::Initialized as i32 => Self::Initialized,
x if x == Self::InvalidOfflineRender as i32 => Self::InvalidOfflineRender,
x if x == Self::Unauthorized as i32 => Self::Unauthorized,
_ => Self::Unknown
})
}
pub fn from_nserror(ns_error: ObjcId) -> Result<(), Self> {
if ns_error != nil {
let code: i32 = unsafe {msg_send![ns_error, code]};
Self::from(code)
}
else {
Ok(())
}
}
}
pub type ItemCount = u64;
pub type MIDIObjectRef = u32;
pub type MIDIClientRef = MIDIObjectRef;
pub type MIDIPortRef = MIDIObjectRef;
pub type MIDIEndpointRef = MIDIObjectRef;
pub type MIDIProtocolID = i32;
pub type MIDITimeStamp = u64;
pub const kMIDIProtocol_1_0: i32 = 1;
pub const kMIDIProtocol_2_0: i32 = 2;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct MIDINotification {
pub messageID: i32,
pub messageSize: u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct MIDIEventList {
pub protocol: MIDIProtocolID,
pub numPackets: u32,
pub packet: [MIDIEventPacket; 1usize],
}
#[repr(C, packed(4))]
#[derive(Copy, Clone)]
pub struct MIDIEventPacket {
pub timeStamp: MIDITimeStamp,
pub wordCount: u32,
pub words: [u32; 64usize],
}
#[link(name = "CoreMidi", kind = "framework")]
extern "C" {
pub static kMIDIPropertyManufacturer: CFStringRef;
pub static kMIDIPropertyDisplayName: CFStringRef;
pub static kMIDIPropertyUniqueID: CFStringRef;
pub fn MIDIGetNumberOfSources() -> ItemCount;
pub fn MIDIGetSource(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDIGetNumberOfDestinations() -> ItemCount;
pub fn MIDIGetDestination(sourceIndex0: ItemCount) -> MIDIEndpointRef;
pub fn MIDISendEventList(
port: MIDIPortRef,
dest: MIDIEndpointRef,
evtlist: *const MIDIEventList,
) -> OSStatus;
pub fn MIDIClientCreateWithBlock(
name: CFStringRef,
outClient: *mut MIDIClientRef,
notifyBlock: ObjcId,
) -> OSStatus;
pub fn MIDIInputPortCreateWithProtocol(
client: MIDIClientRef,
portName: CFStringRef,
protocol: MIDIProtocolID,
outPort: *mut MIDIPortRef,
receiveBlock: ObjcId,
) -> OSStatus;
pub fn MIDIOutputPortCreate(
client: MIDIClientRef,
portName: CFStringRef,
outPort: *mut MIDIPortRef,
) -> OSStatus;
pub fn MIDIObjectGetStringProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
str_: *mut CFStringRef,
) -> OSStatus;
pub fn MIDIObjectGetIntegerProperty(
obj: MIDIObjectRef,
propertyID: CFStringRef,
outValue: *mut i32,
) -> OSStatus;
pub fn MIDIPortConnectSource(
port: MIDIPortRef,
source: MIDIEndpointRef,
connRefCon: *mut ::std::os::raw::c_void,
) -> OSStatus;
} | }
};
// CORE AUDIO | random_line_split |
models.py | # -*- coding: utf-8 -*-
# Copyright 2016, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import socket
import salt.cloud
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, transaction
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from django_extensions.db.models import (
TimeStampedModel,
TitleSlugDescriptionModel,
)
from guardian.shortcuts import get_users_with_perms
from model_utils import Choices
from model_utils.models import StatusModel
from stackdio.core.fields import DeletingFileField
from stackdio.core.utils import recursive_update
from stackdio.api.cloud.models import SecurityGroup
PROTOCOL_CHOICES = [
('tcp', 'TCP'),
('udp', 'UDP'),
('icmp', 'ICMP'),
]
logger = logging.getLogger(__name__)
HOST_INDEX_PATTERN = re.compile(r'.*-.*-(\d+)')
def get_hostnames_from_hostdefs(hostdefs, username='', namespace=''):
hostnames = []
for hostdef in hostdefs:
for i in xrange(hostdef.count):
hostnames.append(
hostdef.hostname_template.format(
namespace=namespace,
username=username,
index=i
)
)
return hostnames
class StackCreationException(Exception):
def __init__(self, errors, *args, **kwargs):
self.errors = errors
super(StackCreationException, self).__init__(*args, **kwargs)
class Level(object):
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARNING'
ERROR = 'ERROR'
class StatusDetailModel(StatusModel):
status_detail = models.TextField(blank=True)
class Meta:
abstract = True
default_permissions = ()
def set_status(self, status, detail=''):
self.status = status
self.status_detail = detail
return self.save()
class StackQuerySet(models.QuerySet):
def create(self, **kwargs):
new_properties = kwargs.pop('properties', {})
with transaction.atomic(using=self.db):
stack = super(StackQuerySet, self).create(**kwargs)
# manage the properties
properties = stack.blueprint.properties
recursive_update(properties, new_properties)
stack.properties = properties
# Create the appropriate hosts & security group objects
stack.create_security_groups()
stack.create_hosts()
return stack
_stack_model_permissions = (
'create',
'admin',
)
_stack_object_permissions = (
'launch',
'view',
'update',
'ssh',
'provision',
'orchestrate',
'execute',
'start',
'stop',
'terminate',
'delete',
'admin',
)
stack_storage = FileSystemStorage(location=os.path.join(settings.FILE_STORAGE_DIRECTORY, 'stacks'))
# For map, pillar, and properties. Doesn't need to go in a sub directory
def get_local_file_path(instance, filename):
return '{0}-{1}/{2}'.format(instance.pk, instance.slug, filename)
# Orchestrate files go in formula directory
def get_orchestrate_file_path(instance, filename):
return '{0}-{1}/formulas/__stackdio__/{2}'.format(instance.pk, instance.slug, filename)
class Stack(TimeStampedModel, TitleSlugDescriptionModel, StatusModel):
# Launch workflow:
PENDING = 'pending'
LAUNCHING = 'launching'
CONFIGURING = 'configuring'
SYNCING = 'syncing'
PROVISIONING = 'provisioning'
ORCHESTRATING = 'orchestrating'
FINALIZING = 'finalizing'
FINISHED = 'finished'
# Delete workflow:
# PENDING
DESTROYING = 'destroying'
# FINISHED
# Other actions
# LAUNCHING
STARTING = 'starting'
STOPPING = 'stopping'
TERMINATING = 'terminating'
EXECUTING_ACTION = 'executing_action'
# Errors
ERROR = 'error'
SAFE_STATES = [FINISHED, ERROR]
# Not sure?
OK = 'ok'
RUNNING = 'running'
REBOOTING = 'rebooting'
STATUS = Choices(PENDING, LAUNCHING, CONFIGURING, SYNCING, PROVISIONING,
ORCHESTRATING, FINALIZING, DESTROYING, FINISHED,
STARTING, STOPPING, TERMINATING, EXECUTING_ACTION, ERROR)
model_permissions = _stack_model_permissions
object_permissions = _stack_object_permissions
searchable_fields = ('title', 'description', 'history__status_detail')
class Meta:
ordering = ('title',)
default_permissions = tuple(set(_stack_model_permissions + _stack_object_permissions))
unique_together = ('title',)
# What blueprint did this stack derive from?
blueprint = models.ForeignKey('blueprints.Blueprint', related_name='stacks')
formula_versions = GenericRelation('formulas.FormulaVersion')
labels = GenericRelation('core.Label')
# An arbitrary namespace for this stack. Mainly useful for Blueprint
# hostname templates
namespace = models.CharField('Namespace', max_length=64)
create_users = models.BooleanField('Create SSH Users')
# Where on disk is the salt-cloud map file stored
map_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom salt top.sls file stored
top_file = DeletingFileField(
max_length=255,
null=True,
blank=True,
default=None,
storage=FileSystemStorage(location=settings.STACKDIO_CONFIG.salt_core_states))
# Where on disk is the custom orchestrate file stored
orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the global orchestrate file stored
global_orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
global_pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# storage for properties file
props_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Use our custom manager object
objects = StackQuerySet.as_manager()
def __unicode__(self):
|
def set_status(self, event, status, detail, level=Level.INFO):
self.status = status
self.save()
self.history.create(event=event, status=status,
status_detail=detail, level=level)
def get_driver_hosts_map(self, host_ids=None):
"""
Stacks are comprised of multiple hosts. Each host may be
located in different cloud accounts. This method returns
a map of the underlying driver implementation and the hosts
that running in the account.
@param host_ids (list); a list of primary keys for the hosts
we're interested in
@returns (dict); each key is a provider driver implementation
with QuerySet value for the matching host objects
"""
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
for account, hosts in accounts.items():
result[account.get_driver()] = host_queryset.filter(id__in=[h.id for h in hosts])
return result
def get_hosts(self, host_ids=None):
"""
Quick way of getting all hosts or a subset for this stack.
@host_ids (list); list of primary keys of hosts in this stack
@returns (QuerySet);
"""
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids)
def get_formulas(self):
return self.blueprint.get_formulas()
def get_tags(self):
tags = {}
for label in self.labels.all():
tags[label.key] = label.value
tags['stack_id'] = self.id
# No name allowed. salt-cloud uses this and it would break everything.
if 'Name' in tags:
del tags['Name']
return tags
@property
def properties(self):
if not self.props_file:
return {}
with open(self.props_file.path, 'r') as f:
return json.load(f)
@properties.setter
def properties(self, props):
props_json = json.dumps(props, indent=4)
if not self.props_file:
self.props_file.save('stack.props', ContentFile(props_json))
else:
with open(self.props_file.path, 'w') as f:
f.write(props_json)
def create_security_groups(self):
for hostdef in self.blueprint.host_definitions.all():
# create the managed security group for each host definition
# and assign the rules to the group
sg_name = 'stackdio-managed-{0}-stack-{1}'.format(
hostdef.slug,
self.pk
)
sg_description = 'stackd.io managed security group'
# cloud account and driver for the host definition
account = hostdef.cloud_image.account
if not account.create_security_groups:
logger.debug('Skipping creation of {0} because security group creation is turned '
'off for the account'.format(sg_name))
continue
driver = account.get_driver()
try:
sg_id = driver.create_security_group(sg_name,
sg_description,
delete_if_exists=True)
except Exception as e:
err_msg = 'Error creating security group: {0}'.format(str(e))
self.set_status('create_security_groups', self.ERROR,
err_msg, Level.ERROR)
logger.debug('Created security group {0}: {1}'.format(
sg_name,
sg_id
))
for access_rule in hostdef.access_rules.all():
driver.authorize_security_group(sg_id, {
'protocol': access_rule.protocol,
'from_port': access_rule.from_port,
'to_port': access_rule.to_port,
'rule': access_rule.rule,
})
# create the security group object that we can use for tracking
self.security_groups.create(
account=account,
blueprint_host_definition=hostdef,
name=sg_name,
description=sg_description,
group_id=sg_id,
is_managed=True
)
def create_hosts(self, host_definition=None, count=None, backfill=False):
"""
Creates host objects on this Stack. If no arguments are given, then
all hosts available based on the Stack's blueprint host definitions
will be created. If args are given, then only the `count` for the
given `host_definition` will be created.
@param host_definition (BlueprintHostDefinition object); the host
definition to use for creating new hosts. If None, all host
definitions for the stack's blueprint will be used.
@param count (int); the number of hosts to create. If None, all
hosts will be created.
@param backfill (bool); If True, then hosts will be created with
hostnames that fill in any gaps if necessary. If False, then
hostnames will start at the end of the host list. This is only
used when `host_definition` and `count` arguments are provided.
"""
created_hosts = []
if host_definition is None:
host_definitions = self.blueprint.host_definitions.all()
else:
host_definitions = [host_definition]
for hostdef in host_definitions:
hosts = self.hosts.all()
if count is None:
start, end = 0, hostdef.count
indexes = range(start, end)
elif not hosts:
start, end = 0, count
indexes = range(start, end)
else:
if backfill:
hosts = hosts.order_by('index')
# The set of existing host indexes
host_indexes = set([h.index for h in hosts])
# The last index available
last_index = sorted(host_indexes)[-1]
# The set of expected indexes based on the last known
# index
expected_indexes = set(range(last_index + 1))
# Any gaps any the expected indexes?
gaps = expected_indexes - host_indexes
indexes = []
if gaps:
indexes = list(gaps)
count -= len(indexes)
start = sorted(host_indexes)[-1] + 1
end = start + count
indexes += range(start, end)
else:
start = hosts.order_by('-index')[0].index + 1
end = start + count
indexes = xrange(start, end)
# all components defined in the host definition
components = hostdef.formula_components.all()
# iterate over the host definition count and create individual
# host records on the stack
for i in indexes:
hostname = hostdef.hostname_template.format(
namespace=self.namespace,
index=i
)
kwargs = dict(
index=i,
cloud_image=hostdef.cloud_image,
blueprint_host_definition=hostdef,
instance_size=hostdef.size,
hostname=hostname,
sir_price=hostdef.spot_price,
state=Host.PENDING
)
if hostdef.cloud_image.account.vpc_enabled:
kwargs['subnet_id'] = hostdef.subnet_id
else:
kwargs['availability_zone'] = hostdef.zone
host = self.hosts.create(**kwargs)
account = host.cloud_image.account
# Add in the cloud account default security groups as
# defined by an admin.
account_groups = set(list(
account.security_groups.filter(
is_default=True
)
))
host.security_groups.add(*account_groups)
if account.create_security_groups:
# Add in the security group provided by this host definition,
# but only if this functionality is enabled on the account
security_group = SecurityGroup.objects.get(
stack=self,
blueprint_host_definition=hostdef
)
host.security_groups.add(security_group)
# add formula components
host.formula_components.add(*components)
for volumedef in hostdef.volumes.all():
self.volumes.create(
host=host,
snapshot=volumedef.snapshot,
hostname=hostname,
device=volumedef.device,
mount_point=volumedef.mount_point
)
created_hosts.append(host)
return created_hosts
def generate_cloud_map(self):
# TODO: Figure out a way to make this provider agnostic
# TODO: Should we store this somewhere instead of assuming
master = socket.getfqdn()
images = {}
hosts = self.hosts.all()
cluster_size = len(hosts)
for host in hosts:
# load provider yaml to extract default security groups
cloud_account = host.cloud_image.account
cloud_account_yaml = yaml.safe_load(cloud_account.yaml)[cloud_account.slug]
# pull various stuff we need for a host
roles = [c.sls_path for c in host.formula_components.all()]
instance_size = host.instance_size.title
security_groups = set([
sg.group_id for sg in host.security_groups.all()
])
volumes = host.volumes.all()
domain = cloud_account_yaml['append_domain']
fqdn = '{0}.{1}'.format(host.hostname, domain)
# The volumes will be defined on the map as well as in the grains.
# Those in the map are used by salt-cloud to create and attach
# the volumes (using the snapshot), whereas those on the grains
# are available for states and modules to play with (e.g., to
# mount the devices)
map_volumes = []
for vol in volumes:
v = {
'device': vol.device,
'mount_point': vol.mount_point,
# filesystem_type doesn't matter, should remove soon
'filesystem_type': vol.snapshot.filesystem_type,
'type': 'gp2',
}
if vol.volume_id:
v['volume_id'] = vol.volume_id
else:
v['snapshot'] = vol.snapshot.snapshot_id
map_volumes.append(v)
host_metadata = {
'name': host.hostname,
# The parameters in the minion dict will be passed on
# to the minion and set in its default configuration
# at /etc/salt/minion. This is where you would override
# any default values set by salt-minion
'minion': {
'master': master,
'log_level': 'debug',
'log_level_logfile': 'debug',
'mine_functions': {
'grains.items': []
},
# Grains are very useful when you need to set some
# static information about a machine (e.g., what stack
# id its registered under or how many total machines
# are in the cluster)
'grains': {
'roles': roles,
'stack_id': int(self.pk),
'fqdn': fqdn,
'domain': domain,
'cluster_size': cluster_size,
'stack_pillar_file': self.pillar_file.path,
'volumes': map_volumes,
'cloud_account': host.cloud_image.account.slug,
'cloud_image': host.cloud_image.slug,
'namespace': self.namespace,
},
},
# The rest of the settings in the map are salt-cloud
# specific and control the VM in various ways
# depending on the cloud account being used.
'size': instance_size,
'securitygroupid': list(security_groups),
'volumes': map_volumes,
'delvol_on_destroy': True,
'del_all_vols_on_destroy': True,
}
if cloud_account.vpc_enabled:
host_metadata['subnetid'] = host.subnet_id
else:
host_metadata['availability_zone'] = host.availability_zone.title
# Add in spot instance config if needed
if host.sir_price:
host_metadata['spot_config'] = {
'spot_price': str(host.sir_price) # convert to string
}
images.setdefault(host.cloud_image.slug, {})[host.hostname] = host_metadata
return images
def generate_map_file(self):
images = self.generate_cloud_map()
map_file_yaml = yaml.safe_dump(images, default_flow_style=False)
if not self.map_file:
self.map_file.save('stack.map', ContentFile(map_file_yaml))
else:
with open(self.map_file.path, 'w') as f:
f.write(map_file_yaml)
def generate_top_file(self):
top_file_data = {
'base': {
'G@stack_id:{0}'.format(self.pk): [
{'match': 'compound'},
'core.*',
]
}
}
top_file_yaml = yaml.safe_dump(top_file_data, default_flow_style=False)
if not self.top_file:
self.top_file.save('stack_{0}_top.sls'.format(self.pk), ContentFile(top_file_yaml))
else:
with open(self.top_file.path, 'w') as f:
f.write(top_file_yaml)
def generate_orchestrate_file(self):
hosts = self.hosts.all()
stack_target = 'G@stack_id:{0}'.format(self.pk)
def _matcher(sls_set):
return ' and '.join(
[stack_target] + ['G@roles:{0}'.format(i) for i in sls_set]
)
groups = {}
for host in hosts:
for component in host.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
orchestrate = {}
for order in sorted(groups.keys()):
for role in groups[order]:
orchestrate[role] = {
'salt.state': [
{'tgt': _matcher([role])},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.orchestrate_file:
self.orchestrate_file.save('orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_global_orchestrate_file(self):
accounts = set([host.cloud_image.account for host in self.hosts.all()])
orchestrate = {}
for account in accounts:
# Target the stack_id and cloud account
target = 'G@stack_id:{0} and G@cloud_account:{1}'.format(
self.id,
account.slug)
groups = {}
for component in account.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
for order in sorted(groups.keys()):
for role in groups[order]:
state_title = '{0}_{1}'.format(account.slug, role)
orchestrate[state_title] = {
'salt.state': [
{'tgt': target},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.global_orchestrate_file:
self.global_orchestrate_file.save('global_orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.global_orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
users = []
# pull the create_ssh_users property from the stackd.io config file.
# If it's False, we won't create ssh users on the box.
if self.create_users:
user_permissions_map = get_users_with_perms(
self, attach_perms=True, with_superusers=True, with_group_users=True
)
for user, perms in user_permissions_map.items():
if 'ssh_stack' in perms:
if user.settings.public_key:
logger.debug('Granting {0} ssh permission to stack: {1}'.format(
user.username,
self.title,
))
users.append({
'username': user.username,
'public_key': user.settings.public_key,
'id': user.id,
})
else:
logger.debug(
'User {0} has ssh permission for stack {1}, but has no public key. '
'Skipping.'.format(
user.username,
self.title,
)
)
pillar_props = {
'__stackdio__': {
'users': users
}
}
# If any of the formulas we're using have default pillar
# data defined in its corresponding SPECFILE, we need to pull
# that into our stack pillar file.
# First get the unique set of formulas
formulas = set()
for host in self.hosts.all():
formulas.update([c.formula for c in host.formula_components.all()])
# Update the formulas if requested
if update_formulas:
for formula in formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# for each unique formula, pull the properties from the SPECFILE
for formula in formulas:
recursive_update(pillar_props, formula.properties)
# Add in properties that were supplied via the blueprint and during
# stack creation
recursive_update(pillar_props, self.properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.pillar_file:
self.pillar_file.save('stack.pillar', ContentFile(pillar_file_yaml))
else:
with open(self.pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def generate_global_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
pillar_props = {}
# Find all of the globally used formulas for the stack
accounts = set(
[host.cloud_image.account for
host in self.hosts.all()]
)
global_formulas = []
for account in accounts:
global_formulas.extend(account.get_formulas())
# Update the formulas if requested
if update_formulas:
for formula in global_formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# Add the global formulas into the props
for formula in set(global_formulas):
recursive_update(pillar_props, formula.properties)
# Add in the account properties AFTER the stack properties
for account in accounts:
recursive_update(pillar_props,
account.global_orchestration_properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.global_pillar_file:
self.global_pillar_file.save('stack.global_pillar', ContentFile(pillar_file_yaml))
else:
with open(self.global_pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def query_hosts(self, force=False):
"""
Uses salt-cloud to query all the hosts for the given stack id.
"""
CACHE_KEY = 'salt-cloud-full-query'
cached_result = cache.get(CACHE_KEY)
if cached_result and not force:
logger.debug('salt-cloud query result cached')
result = cached_result
else:
logger.debug('salt-cloud query result not cached, retrieving')
logger.info('get_hosts_info: {0!r}'.format(self))
salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)
result = salt_cloud.full_query()
# Cache the result for a minute
cache.set(CACHE_KEY, result, 60)
# yaml_result contains all host information in the stack, but
# we have to dig a bit to get individual host metadata out
# of account and provider type dictionaries
host_result = {}
for host in self.hosts.all():
account = host.get_account()
provider = account.provider
# each host is buried in a cloud provider type dict that's
# inside a cloud account name dict
# Grab the list of hosts
host_map = result.get(account.slug, {}).get(provider.name, {})
# Grab the individual host
host_result[host.hostname] = host_map.get(host.hostname, None)
return host_result
def get_root_directory(self):
if self.map_file:
return os.path.dirname(self.map_file.path)
if self.props_file:
return os.path.dirname(self.props_file.path)
return None
def get_log_directory(self):
root_dir = self.get_root_directory()
log_dir = os.path.join(root_dir, 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
return log_dir
def get_security_groups(self):
return SecurityGroup.objects.filter(is_managed=True,
hosts__stack=self).distinct()
def get_role_list(self):
roles = set()
for bhd in self.blueprint.host_definitions.all():
for formula_component in bhd.formula_components.all():
roles.add(formula_component.sls_path)
return list(roles)
class StackHistory(TimeStampedModel, StatusDetailModel):
class Meta:
verbose_name_plural = 'stack history'
ordering = ['-created', '-id']
default_permissions = ()
STATUS = Stack.STATUS
stack = models.ForeignKey('Stack', related_name='history')
# What 'event' (method name, task name, etc) that caused
# this status update
event = models.CharField(max_length=128)
# The human-readable description of the event
# status = models.TextField(blank=True)
# Optional: level (DEBUG, INFO, WARNING, ERROR, etc)
level = models.CharField(max_length=16, choices=(
(Level.DEBUG, Level.DEBUG),
(Level.INFO, Level.INFO),
(Level.WARN, Level.WARN),
(Level.ERROR, Level.ERROR),
))
class StackCommand(TimeStampedModel, StatusModel):
WAITING = 'waiting'
RUNNING = 'running'
FINISHED = 'finished'
ERROR = 'error'
STATUS = Choices(WAITING, RUNNING, FINISHED, ERROR)
class Meta:
verbose_name_plural = 'stack actions'
default_permissions = ()
stack = models.ForeignKey('Stack', related_name='commands')
# The started executing
start = models.DateTimeField('Start Time', blank=True, default=now)
# Which hosts we want to target
host_target = models.CharField('Host Target', max_length=255)
# The command to be run (for custom actions)
command = models.TextField('Command')
# The output from the action
std_out_storage = models.TextField()
# The error output from the action
std_err_storage = models.TextField()
@property
def std_out(self):
if self.std_out_storage != "":
return json.loads(self.std_out_storage)
else:
return []
@property
def std_err(self):
return self.std_err_storage
@property
def submit_time(self):
return self.created
@property
def start_time(self):
if self.status in (self.RUNNING, self.FINISHED):
return self.start
else:
return ''
@property
def finish_time(self):
if self.status == self.FINISHED:
return self.modified
else:
return ''
class Host(TimeStampedModel, StatusDetailModel):
PENDING = 'pending'
OK = 'ok'
DELETING = 'deleting'
STATUS = Choices(PENDING, OK, DELETING)
class Meta:
ordering = ['blueprint_host_definition', '-index']
default_permissions = ()
# TODO: We should be using generic foreign keys here to a cloud account
# specific implementation of a Host object. I'm not exactly sure how this
# will work, but I think by using Django's content type system we can make
# it work...just not sure how easy it will be to extend, maintain, etc.
stack = models.ForeignKey('Stack',
related_name='hosts')
cloud_image = models.ForeignKey('cloud.CloudImage',
related_name='hosts')
instance_size = models.ForeignKey('cloud.CloudInstanceSize',
related_name='hosts')
availability_zone = models.ForeignKey('cloud.CloudZone',
null=True,
related_name='hosts')
subnet_id = models.CharField('Subnet ID', max_length=32, blank=True, default='')
blueprint_host_definition = models.ForeignKey(
'blueprints.BlueprintHostDefinition',
related_name='hosts')
hostname = models.CharField('Hostname', max_length=64)
index = models.IntegerField('Index')
security_groups = models.ManyToManyField('cloud.SecurityGroup',
related_name='hosts')
# The machine state as provided by the cloud account
state = models.CharField('State', max_length=32, default='unknown')
state_reason = models.CharField('State Reason', max_length=255, default='', blank=True)
# This must be updated automatically after the host is online.
# After salt-cloud has launched VMs, we will need to look up
# the DNS name set by whatever cloud provider is being used
# and set it here
provider_dns = models.CharField('Provider DNS', max_length=64, blank=True)
provider_private_dns = models.CharField('Provider Private DNS', max_length=64, blank=True)
provider_private_ip = models.CharField('Provider Private IP Address', max_length=64, blank=True)
# The FQDN for the host. This includes the hostname and the
# domain if it was registered with DNS
fqdn = models.CharField('FQDN', max_length=255, blank=True)
# Instance id of the running host. This is provided by the cloud
# provider
instance_id = models.CharField('Instance ID', max_length=32, blank=True)
# Spot instance request ID will be populated when metadata is refreshed
# if the host has been configured to launch spot instances. By default,
# it will be unknown and will be set to NA if spot instances were not
# used.
sir_id = models.CharField('SIR ID',
max_length=32,
default='unknown')
# The spot instance price for this host if using spot instances
sir_price = models.DecimalField('Spot Price',
max_digits=5,
decimal_places=2,
null=True)
def __unicode__(self):
return self.hostname
@property
def provider_metadata(self):
metadata = self.stack.query_hosts()
return metadata[self.hostname]
@property
def formula_components(self):
return self.blueprint_host_definition.formula_components
def get_account(self):
return self.cloud_image.account
def get_provider(self):
return self.get_account().provider
def get_driver(self):
return self.cloud_image.get_driver()
| return u'{0} (id={1})'.format(self.title, self.id) | identifier_body |
models.py | # -*- coding: utf-8 -*-
# Copyright 2016, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import socket
import salt.cloud
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, transaction
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from django_extensions.db.models import (
TimeStampedModel,
TitleSlugDescriptionModel,
)
from guardian.shortcuts import get_users_with_perms
from model_utils import Choices
from model_utils.models import StatusModel
from stackdio.core.fields import DeletingFileField
from stackdio.core.utils import recursive_update
from stackdio.api.cloud.models import SecurityGroup
PROTOCOL_CHOICES = [
('tcp', 'TCP'),
('udp', 'UDP'),
('icmp', 'ICMP'),
]
logger = logging.getLogger(__name__)
HOST_INDEX_PATTERN = re.compile(r'.*-.*-(\d+)')
def get_hostnames_from_hostdefs(hostdefs, username='', namespace=''):
hostnames = []
for hostdef in hostdefs:
for i in xrange(hostdef.count):
hostnames.append(
hostdef.hostname_template.format(
namespace=namespace,
username=username,
index=i
)
)
return hostnames
class StackCreationException(Exception):
def __init__(self, errors, *args, **kwargs):
self.errors = errors
super(StackCreationException, self).__init__(*args, **kwargs)
class Level(object):
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARNING'
ERROR = 'ERROR'
class StatusDetailModel(StatusModel):
status_detail = models.TextField(blank=True)
class Meta:
abstract = True
default_permissions = ()
def set_status(self, status, detail=''):
self.status = status
self.status_detail = detail
return self.save()
class StackQuerySet(models.QuerySet):
def create(self, **kwargs):
new_properties = kwargs.pop('properties', {})
with transaction.atomic(using=self.db):
stack = super(StackQuerySet, self).create(**kwargs)
# manage the properties
properties = stack.blueprint.properties
recursive_update(properties, new_properties)
stack.properties = properties
# Create the appropriate hosts & security group objects
stack.create_security_groups()
stack.create_hosts()
return stack
_stack_model_permissions = (
'create',
'admin',
)
_stack_object_permissions = (
'launch',
'view',
'update',
'ssh',
'provision',
'orchestrate',
'execute',
'start',
'stop',
'terminate',
'delete',
'admin',
)
stack_storage = FileSystemStorage(location=os.path.join(settings.FILE_STORAGE_DIRECTORY, 'stacks'))
# For map, pillar, and properties. Doesn't need to go in a sub directory
def get_local_file_path(instance, filename):
return '{0}-{1}/{2}'.format(instance.pk, instance.slug, filename)
# Orchestrate files go in formula directory
def get_orchestrate_file_path(instance, filename):
return '{0}-{1}/formulas/__stackdio__/{2}'.format(instance.pk, instance.slug, filename)
class Stack(TimeStampedModel, TitleSlugDescriptionModel, StatusModel):
# Launch workflow:
PENDING = 'pending'
LAUNCHING = 'launching'
CONFIGURING = 'configuring'
SYNCING = 'syncing'
PROVISIONING = 'provisioning'
ORCHESTRATING = 'orchestrating'
FINALIZING = 'finalizing'
FINISHED = 'finished'
# Delete workflow:
# PENDING
DESTROYING = 'destroying'
# FINISHED
# Other actions
# LAUNCHING
STARTING = 'starting'
STOPPING = 'stopping'
TERMINATING = 'terminating'
EXECUTING_ACTION = 'executing_action'
# Errors
ERROR = 'error'
SAFE_STATES = [FINISHED, ERROR]
# Not sure?
OK = 'ok'
RUNNING = 'running'
REBOOTING = 'rebooting'
STATUS = Choices(PENDING, LAUNCHING, CONFIGURING, SYNCING, PROVISIONING,
ORCHESTRATING, FINALIZING, DESTROYING, FINISHED,
STARTING, STOPPING, TERMINATING, EXECUTING_ACTION, ERROR)
model_permissions = _stack_model_permissions
object_permissions = _stack_object_permissions
searchable_fields = ('title', 'description', 'history__status_detail')
class Meta:
ordering = ('title',)
default_permissions = tuple(set(_stack_model_permissions + _stack_object_permissions))
unique_together = ('title',)
# What blueprint did this stack derive from?
blueprint = models.ForeignKey('blueprints.Blueprint', related_name='stacks')
formula_versions = GenericRelation('formulas.FormulaVersion')
labels = GenericRelation('core.Label')
# An arbitrary namespace for this stack. Mainly useful for Blueprint
# hostname templates
namespace = models.CharField('Namespace', max_length=64)
create_users = models.BooleanField('Create SSH Users')
# Where on disk is the salt-cloud map file stored
map_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom salt top.sls file stored
top_file = DeletingFileField(
max_length=255,
null=True,
blank=True,
default=None,
storage=FileSystemStorage(location=settings.STACKDIO_CONFIG.salt_core_states))
# Where on disk is the custom orchestrate file stored
orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the global orchestrate file stored
global_orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
global_pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# storage for properties file
props_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Use our custom manager object
objects = StackQuerySet.as_manager()
def __unicode__(self):
return u'{0} (id={1})'.format(self.title, self.id)
def set_status(self, event, status, detail, level=Level.INFO):
self.status = status
self.save()
self.history.create(event=event, status=status,
status_detail=detail, level=level)
def get_driver_hosts_map(self, host_ids=None):
"""
Stacks are comprised of multiple hosts. Each host may be
located in different cloud accounts. This method returns
a map of the underlying driver implementation and the hosts
that running in the account.
@param host_ids (list); a list of primary keys for the hosts
we're interested in
@returns (dict); each key is a provider driver implementation
with QuerySet value for the matching host objects
"""
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
for account, hosts in accounts.items():
result[account.get_driver()] = host_queryset.filter(id__in=[h.id for h in hosts])
return result
def get_hosts(self, host_ids=None):
"""
Quick way of getting all hosts or a subset for this stack.
@host_ids (list); list of primary keys of hosts in this stack
@returns (QuerySet);
"""
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids)
def get_formulas(self):
return self.blueprint.get_formulas()
def get_tags(self):
tags = {}
for label in self.labels.all():
tags[label.key] = label.value
tags['stack_id'] = self.id
# No name allowed. salt-cloud uses this and it would break everything.
if 'Name' in tags:
del tags['Name']
return tags
@property
def properties(self):
if not self.props_file:
return {}
with open(self.props_file.path, 'r') as f:
return json.load(f)
@properties.setter
def properties(self, props):
props_json = json.dumps(props, indent=4)
if not self.props_file:
self.props_file.save('stack.props', ContentFile(props_json))
else:
with open(self.props_file.path, 'w') as f:
f.write(props_json)
def create_security_groups(self):
for hostdef in self.blueprint.host_definitions.all():
# create the managed security group for each host definition
# and assign the rules to the group
sg_name = 'stackdio-managed-{0}-stack-{1}'.format(
hostdef.slug,
self.pk
)
sg_description = 'stackd.io managed security group'
# cloud account and driver for the host definition
account = hostdef.cloud_image.account
if not account.create_security_groups:
logger.debug('Skipping creation of {0} because security group creation is turned '
'off for the account'.format(sg_name))
continue
driver = account.get_driver()
try:
sg_id = driver.create_security_group(sg_name,
sg_description,
delete_if_exists=True)
except Exception as e:
err_msg = 'Error creating security group: {0}'.format(str(e))
self.set_status('create_security_groups', self.ERROR,
err_msg, Level.ERROR)
logger.debug('Created security group {0}: {1}'.format(
sg_name,
sg_id
))
for access_rule in hostdef.access_rules.all():
driver.authorize_security_group(sg_id, {
'protocol': access_rule.protocol,
'from_port': access_rule.from_port,
'to_port': access_rule.to_port,
'rule': access_rule.rule,
})
# create the security group object that we can use for tracking
self.security_groups.create(
account=account,
blueprint_host_definition=hostdef,
name=sg_name,
description=sg_description,
group_id=sg_id,
is_managed=True
)
def create_hosts(self, host_definition=None, count=None, backfill=False):
"""
Creates host objects on this Stack. If no arguments are given, then
all hosts available based on the Stack's blueprint host definitions
will be created. If args are given, then only the `count` for the
given `host_definition` will be created.
@param host_definition (BlueprintHostDefinition object); the host
definition to use for creating new hosts. If None, all host
definitions for the stack's blueprint will be used.
@param count (int); the number of hosts to create. If None, all
hosts will be created.
@param backfill (bool); If True, then hosts will be created with
hostnames that fill in any gaps if necessary. If False, then
hostnames will start at the end of the host list. This is only
used when `host_definition` and `count` arguments are provided.
"""
created_hosts = []
if host_definition is None:
host_definitions = self.blueprint.host_definitions.all()
else:
host_definitions = [host_definition]
for hostdef in host_definitions:
hosts = self.hosts.all()
if count is None:
start, end = 0, hostdef.count
indexes = range(start, end)
elif not hosts:
start, end = 0, count
indexes = range(start, end)
else:
if backfill:
hosts = hosts.order_by('index')
# The set of existing host indexes
host_indexes = set([h.index for h in hosts])
# The last index available
last_index = sorted(host_indexes)[-1]
# The set of expected indexes based on the last known
# index
expected_indexes = set(range(last_index + 1))
# Any gaps any the expected indexes?
gaps = expected_indexes - host_indexes
indexes = []
if gaps:
indexes = list(gaps)
count -= len(indexes)
start = sorted(host_indexes)[-1] + 1
end = start + count
indexes += range(start, end)
else:
start = hosts.order_by('-index')[0].index + 1
end = start + count
indexes = xrange(start, end)
# all components defined in the host definition
components = hostdef.formula_components.all()
# iterate over the host definition count and create individual
# host records on the stack
for i in indexes:
hostname = hostdef.hostname_template.format(
namespace=self.namespace,
index=i
)
kwargs = dict(
index=i,
cloud_image=hostdef.cloud_image,
blueprint_host_definition=hostdef,
instance_size=hostdef.size,
hostname=hostname,
sir_price=hostdef.spot_price,
state=Host.PENDING
)
if hostdef.cloud_image.account.vpc_enabled:
kwargs['subnet_id'] = hostdef.subnet_id
else:
kwargs['availability_zone'] = hostdef.zone
host = self.hosts.create(**kwargs)
account = host.cloud_image.account
# Add in the cloud account default security groups as
# defined by an admin.
account_groups = set(list(
account.security_groups.filter(
is_default=True
)
))
host.security_groups.add(*account_groups)
if account.create_security_groups:
# Add in the security group provided by this host definition,
# but only if this functionality is enabled on the account
security_group = SecurityGroup.objects.get(
stack=self,
blueprint_host_definition=hostdef
)
host.security_groups.add(security_group)
# add formula components
host.formula_components.add(*components)
for volumedef in hostdef.volumes.all():
self.volumes.create(
host=host,
snapshot=volumedef.snapshot,
hostname=hostname,
device=volumedef.device,
mount_point=volumedef.mount_point
)
created_hosts.append(host)
return created_hosts
def generate_cloud_map(self):
# TODO: Figure out a way to make this provider agnostic
# TODO: Should we store this somewhere instead of assuming
master = socket.getfqdn()
images = {}
hosts = self.hosts.all()
cluster_size = len(hosts)
for host in hosts:
# load provider yaml to extract default security groups
cloud_account = host.cloud_image.account
cloud_account_yaml = yaml.safe_load(cloud_account.yaml)[cloud_account.slug]
# pull various stuff we need for a host
roles = [c.sls_path for c in host.formula_components.all()]
instance_size = host.instance_size.title
security_groups = set([
sg.group_id for sg in host.security_groups.all()
])
volumes = host.volumes.all()
domain = cloud_account_yaml['append_domain']
fqdn = '{0}.{1}'.format(host.hostname, domain)
# The volumes will be defined on the map as well as in the grains.
# Those in the map are used by salt-cloud to create and attach
# the volumes (using the snapshot), whereas those on the grains
# are available for states and modules to play with (e.g., to
# mount the devices)
map_volumes = []
for vol in volumes:
v = {
'device': vol.device,
'mount_point': vol.mount_point,
# filesystem_type doesn't matter, should remove soon
'filesystem_type': vol.snapshot.filesystem_type,
'type': 'gp2',
}
if vol.volume_id:
v['volume_id'] = vol.volume_id
else:
v['snapshot'] = vol.snapshot.snapshot_id
map_volumes.append(v)
host_metadata = {
'name': host.hostname,
# The parameters in the minion dict will be passed on
# to the minion and set in its default configuration
# at /etc/salt/minion. This is where you would override
# any default values set by salt-minion
'minion': {
'master': master,
'log_level': 'debug',
'log_level_logfile': 'debug',
'mine_functions': {
'grains.items': []
},
# Grains are very useful when you need to set some
# static information about a machine (e.g., what stack
# id its registered under or how many total machines
# are in the cluster)
'grains': {
'roles': roles,
'stack_id': int(self.pk),
'fqdn': fqdn,
'domain': domain,
'cluster_size': cluster_size,
'stack_pillar_file': self.pillar_file.path,
'volumes': map_volumes,
'cloud_account': host.cloud_image.account.slug,
'cloud_image': host.cloud_image.slug,
'namespace': self.namespace,
},
},
# The rest of the settings in the map are salt-cloud
# specific and control the VM in various ways
# depending on the cloud account being used.
'size': instance_size,
'securitygroupid': list(security_groups),
'volumes': map_volumes,
'delvol_on_destroy': True,
'del_all_vols_on_destroy': True,
}
if cloud_account.vpc_enabled:
host_metadata['subnetid'] = host.subnet_id
else:
host_metadata['availability_zone'] = host.availability_zone.title
# Add in spot instance config if needed
if host.sir_price:
host_metadata['spot_config'] = {
'spot_price': str(host.sir_price) # convert to string
}
images.setdefault(host.cloud_image.slug, {})[host.hostname] = host_metadata
return images
def generate_map_file(self):
images = self.generate_cloud_map()
map_file_yaml = yaml.safe_dump(images, default_flow_style=False)
if not self.map_file:
self.map_file.save('stack.map', ContentFile(map_file_yaml))
else:
with open(self.map_file.path, 'w') as f:
f.write(map_file_yaml)
def generate_top_file(self):
top_file_data = {
'base': {
'G@stack_id:{0}'.format(self.pk): [
{'match': 'compound'},
'core.*',
]
}
}
top_file_yaml = yaml.safe_dump(top_file_data, default_flow_style=False)
if not self.top_file:
self.top_file.save('stack_{0}_top.sls'.format(self.pk), ContentFile(top_file_yaml))
else:
with open(self.top_file.path, 'w') as f:
f.write(top_file_yaml)
def generate_orchestrate_file(self):
hosts = self.hosts.all()
stack_target = 'G@stack_id:{0}'.format(self.pk)
def _matcher(sls_set):
return ' and '.join(
[stack_target] + ['G@roles:{0}'.format(i) for i in sls_set]
)
groups = {}
for host in hosts:
for component in host.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
orchestrate = {}
for order in sorted(groups.keys()):
for role in groups[order]:
orchestrate[role] = {
'salt.state': [
{'tgt': _matcher([role])},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.orchestrate_file:
self.orchestrate_file.save('orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_global_orchestrate_file(self):
accounts = set([host.cloud_image.account for host in self.hosts.all()])
orchestrate = {}
for account in accounts:
# Target the stack_id and cloud account
target = 'G@stack_id:{0} and G@cloud_account:{1}'.format(
self.id,
account.slug)
groups = {}
for component in account.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
for order in sorted(groups.keys()):
for role in groups[order]:
state_title = '{0}_{1}'.format(account.slug, role)
orchestrate[state_title] = {
'salt.state': [
{'tgt': target},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.global_orchestrate_file:
self.global_orchestrate_file.save('global_orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.global_orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
users = []
# pull the create_ssh_users property from the stackd.io config file.
# If it's False, we won't create ssh users on the box.
if self.create_users:
user_permissions_map = get_users_with_perms(
self, attach_perms=True, with_superusers=True, with_group_users=True
)
for user, perms in user_permissions_map.items():
if 'ssh_stack' in perms:
if user.settings.public_key:
logger.debug('Granting {0} ssh permission to stack: {1}'.format(
user.username,
self.title,
))
users.append({
'username': user.username,
'public_key': user.settings.public_key,
'id': user.id,
})
else:
logger.debug(
'User {0} has ssh permission for stack {1}, but has no public key. '
'Skipping.'.format(
user.username,
self.title,
)
)
pillar_props = {
'__stackdio__': {
'users': users
}
}
# If any of the formulas we're using have default pillar
# data defined in its corresponding SPECFILE, we need to pull
# that into our stack pillar file.
# First get the unique set of formulas
formulas = set()
for host in self.hosts.all():
formulas.update([c.formula for c in host.formula_components.all()])
# Update the formulas if requested
if update_formulas:
for formula in formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# for each unique formula, pull the properties from the SPECFILE
for formula in formulas:
recursive_update(pillar_props, formula.properties)
# Add in properties that were supplied via the blueprint and during
# stack creation
recursive_update(pillar_props, self.properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.pillar_file:
self.pillar_file.save('stack.pillar', ContentFile(pillar_file_yaml))
else:
with open(self.pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def generate_global_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
pillar_props = {}
# Find all of the globally used formulas for the stack
accounts = set(
[host.cloud_image.account for
host in self.hosts.all()]
)
global_formulas = []
for account in accounts:
global_formulas.extend(account.get_formulas())
# Update the formulas if requested
if update_formulas:
for formula in global_formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# Add the global formulas into the props
for formula in set(global_formulas):
recursive_update(pillar_props, formula.properties)
# Add in the account properties AFTER the stack properties
for account in accounts:
recursive_update(pillar_props,
account.global_orchestration_properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.global_pillar_file:
self.global_pillar_file.save('stack.global_pillar', ContentFile(pillar_file_yaml))
else:
with open(self.global_pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def query_hosts(self, force=False):
"""
Uses salt-cloud to query all the hosts for the given stack id.
"""
CACHE_KEY = 'salt-cloud-full-query'
cached_result = cache.get(CACHE_KEY)
if cached_result and not force:
logger.debug('salt-cloud query result cached')
result = cached_result
else:
logger.debug('salt-cloud query result not cached, retrieving')
logger.info('get_hosts_info: {0!r}'.format(self))
salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)
result = salt_cloud.full_query()
# Cache the result for a minute
cache.set(CACHE_KEY, result, 60)
# yaml_result contains all host information in the stack, but
# we have to dig a bit to get individual host metadata out
# of account and provider type dictionaries
host_result = {}
for host in self.hosts.all():
account = host.get_account()
provider = account.provider
# each host is buried in a cloud provider type dict that's
# inside a cloud account name dict
# Grab the list of hosts
host_map = result.get(account.slug, {}).get(provider.name, {})
# Grab the individual host
host_result[host.hostname] = host_map.get(host.hostname, None)
return host_result
def get_root_directory(self):
if self.map_file:
return os.path.dirname(self.map_file.path)
if self.props_file:
return os.path.dirname(self.props_file.path)
return None
def get_log_directory(self):
root_dir = self.get_root_directory()
log_dir = os.path.join(root_dir, 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
return log_dir
def get_security_groups(self):
return SecurityGroup.objects.filter(is_managed=True,
hosts__stack=self).distinct()
def get_role_list(self):
roles = set()
for bhd in self.blueprint.host_definitions.all():
for formula_component in bhd.formula_components.all():
roles.add(formula_component.sls_path)
return list(roles)
class StackHistory(TimeStampedModel, StatusDetailModel):
class Meta:
verbose_name_plural = 'stack history'
ordering = ['-created', '-id']
default_permissions = ()
STATUS = Stack.STATUS
stack = models.ForeignKey('Stack', related_name='history')
# What 'event' (method name, task name, etc) that caused
# this status update
event = models.CharField(max_length=128)
# The human-readable description of the event
# status = models.TextField(blank=True)
# Optional: level (DEBUG, INFO, WARNING, ERROR, etc)
level = models.CharField(max_length=16, choices=(
(Level.DEBUG, Level.DEBUG),
(Level.INFO, Level.INFO),
(Level.WARN, Level.WARN),
(Level.ERROR, Level.ERROR),
))
class StackCommand(TimeStampedModel, StatusModel):
WAITING = 'waiting'
RUNNING = 'running'
FINISHED = 'finished'
ERROR = 'error'
STATUS = Choices(WAITING, RUNNING, FINISHED, ERROR)
class Meta:
verbose_name_plural = 'stack actions'
default_permissions = ()
stack = models.ForeignKey('Stack', related_name='commands')
# The started executing
start = models.DateTimeField('Start Time', blank=True, default=now)
# Which hosts we want to target
host_target = models.CharField('Host Target', max_length=255)
# The command to be run (for custom actions)
command = models.TextField('Command')
# The output from the action
std_out_storage = models.TextField()
# The error output from the action
std_err_storage = models.TextField()
@property
def std_out(self):
if self.std_out_storage != "":
return json.loads(self.std_out_storage)
else:
return []
@property
def std_err(self):
return self.std_err_storage
@property
def submit_time(self):
return self.created
@property
def start_time(self):
if self.status in (self.RUNNING, self.FINISHED):
return self.start
else:
return ''
@property
def finish_time(self):
if self.status == self.FINISHED:
return self.modified
else:
return ''
class Host(TimeStampedModel, StatusDetailModel):
PENDING = 'pending'
OK = 'ok'
DELETING = 'deleting'
STATUS = Choices(PENDING, OK, DELETING)
class Meta:
ordering = ['blueprint_host_definition', '-index']
default_permissions = ()
# TODO: We should be using generic foreign keys here to a cloud account
# specific implementation of a Host object. I'm not exactly sure how this
# will work, but I think by using Django's content type system we can make
# it work...just not sure how easy it will be to extend, maintain, etc.
stack = models.ForeignKey('Stack',
related_name='hosts')
cloud_image = models.ForeignKey('cloud.CloudImage',
related_name='hosts')
instance_size = models.ForeignKey('cloud.CloudInstanceSize',
related_name='hosts')
availability_zone = models.ForeignKey('cloud.CloudZone',
null=True,
related_name='hosts')
subnet_id = models.CharField('Subnet ID', max_length=32, blank=True, default='')
blueprint_host_definition = models.ForeignKey(
'blueprints.BlueprintHostDefinition',
related_name='hosts')
hostname = models.CharField('Hostname', max_length=64)
index = models.IntegerField('Index')
security_groups = models.ManyToManyField('cloud.SecurityGroup',
related_name='hosts')
# The machine state as provided by the cloud account
state = models.CharField('State', max_length=32, default='unknown')
state_reason = models.CharField('State Reason', max_length=255, default='', blank=True)
# This must be updated automatically after the host is online.
# After salt-cloud has launched VMs, we will need to look up
# the DNS name set by whatever cloud provider is being used
# and set it here
provider_dns = models.CharField('Provider DNS', max_length=64, blank=True)
provider_private_dns = models.CharField('Provider Private DNS', max_length=64, blank=True)
provider_private_ip = models.CharField('Provider Private IP Address', max_length=64, blank=True)
# The FQDN for the host. This includes the hostname and the
# domain if it was registered with DNS
fqdn = models.CharField('FQDN', max_length=255, blank=True)
# Instance id of the running host. This is provided by the cloud
# provider
instance_id = models.CharField('Instance ID', max_length=32, blank=True)
# Spot instance request ID will be populated when metadata is refreshed
# if the host has been configured to launch spot instances. By default,
# it will be unknown and will be set to NA if spot instances were not
# used.
sir_id = models.CharField('SIR ID',
max_length=32,
default='unknown')
# The spot instance price for this host if using spot instances
sir_price = models.DecimalField('Spot Price',
max_digits=5,
decimal_places=2,
null=True)
def __unicode__(self):
return self.hostname
@property
def provider_metadata(self):
metadata = self.stack.query_hosts()
return metadata[self.hostname]
@property
def | (self):
return self.blueprint_host_definition.formula_components
def get_account(self):
return self.cloud_image.account
def get_provider(self):
return self.get_account().provider
def get_driver(self):
return self.cloud_image.get_driver()
| formula_components | identifier_name |
models.py | # -*- coding: utf-8 -*-
# Copyright 2016, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import socket
import salt.cloud
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, transaction
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from django_extensions.db.models import (
TimeStampedModel,
TitleSlugDescriptionModel,
)
from guardian.shortcuts import get_users_with_perms
from model_utils import Choices
from model_utils.models import StatusModel
from stackdio.core.fields import DeletingFileField
from stackdio.core.utils import recursive_update
from stackdio.api.cloud.models import SecurityGroup
PROTOCOL_CHOICES = [
('tcp', 'TCP'),
('udp', 'UDP'),
('icmp', 'ICMP'),
]
logger = logging.getLogger(__name__)
HOST_INDEX_PATTERN = re.compile(r'.*-.*-(\d+)')
def get_hostnames_from_hostdefs(hostdefs, username='', namespace=''):
hostnames = []
for hostdef in hostdefs:
for i in xrange(hostdef.count):
hostnames.append(
hostdef.hostname_template.format(
namespace=namespace,
username=username,
index=i
)
)
return hostnames
class StackCreationException(Exception):
def __init__(self, errors, *args, **kwargs):
self.errors = errors
super(StackCreationException, self).__init__(*args, **kwargs)
class Level(object):
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARNING'
ERROR = 'ERROR'
class StatusDetailModel(StatusModel):
status_detail = models.TextField(blank=True)
class Meta:
abstract = True
default_permissions = ()
def set_status(self, status, detail=''):
self.status = status
self.status_detail = detail
return self.save()
class StackQuerySet(models.QuerySet):
def create(self, **kwargs):
new_properties = kwargs.pop('properties', {})
with transaction.atomic(using=self.db):
stack = super(StackQuerySet, self).create(**kwargs)
# manage the properties
properties = stack.blueprint.properties
recursive_update(properties, new_properties)
stack.properties = properties
# Create the appropriate hosts & security group objects
stack.create_security_groups()
stack.create_hosts()
return stack
_stack_model_permissions = (
'create',
'admin',
)
_stack_object_permissions = (
'launch',
'view',
'update',
'ssh',
'provision',
'orchestrate',
'execute',
'start',
'stop',
'terminate',
'delete',
'admin',
)
stack_storage = FileSystemStorage(location=os.path.join(settings.FILE_STORAGE_DIRECTORY, 'stacks'))
# For map, pillar, and properties. Doesn't need to go in a sub directory
def get_local_file_path(instance, filename):
return '{0}-{1}/{2}'.format(instance.pk, instance.slug, filename)
# Orchestrate files go in formula directory
def get_orchestrate_file_path(instance, filename):
return '{0}-{1}/formulas/__stackdio__/{2}'.format(instance.pk, instance.slug, filename)
class Stack(TimeStampedModel, TitleSlugDescriptionModel, StatusModel):
# Launch workflow:
PENDING = 'pending'
LAUNCHING = 'launching'
CONFIGURING = 'configuring'
SYNCING = 'syncing'
PROVISIONING = 'provisioning'
ORCHESTRATING = 'orchestrating'
FINALIZING = 'finalizing'
FINISHED = 'finished'
# Delete workflow:
# PENDING
DESTROYING = 'destroying'
# FINISHED
# Other actions
# LAUNCHING
STARTING = 'starting'
STOPPING = 'stopping'
TERMINATING = 'terminating'
EXECUTING_ACTION = 'executing_action'
# Errors
ERROR = 'error'
SAFE_STATES = [FINISHED, ERROR]
# Not sure?
OK = 'ok'
RUNNING = 'running'
REBOOTING = 'rebooting'
STATUS = Choices(PENDING, LAUNCHING, CONFIGURING, SYNCING, PROVISIONING,
ORCHESTRATING, FINALIZING, DESTROYING, FINISHED,
STARTING, STOPPING, TERMINATING, EXECUTING_ACTION, ERROR)
model_permissions = _stack_model_permissions
object_permissions = _stack_object_permissions
searchable_fields = ('title', 'description', 'history__status_detail')
class Meta:
ordering = ('title',)
default_permissions = tuple(set(_stack_model_permissions + _stack_object_permissions))
unique_together = ('title',)
# What blueprint did this stack derive from?
blueprint = models.ForeignKey('blueprints.Blueprint', related_name='stacks')
formula_versions = GenericRelation('formulas.FormulaVersion')
labels = GenericRelation('core.Label')
# An arbitrary namespace for this stack. Mainly useful for Blueprint
# hostname templates
namespace = models.CharField('Namespace', max_length=64)
create_users = models.BooleanField('Create SSH Users')
# Where on disk is the salt-cloud map file stored
map_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom salt top.sls file stored
top_file = DeletingFileField(
max_length=255,
null=True,
blank=True,
default=None,
storage=FileSystemStorage(location=settings.STACKDIO_CONFIG.salt_core_states))
# Where on disk is the custom orchestrate file stored
orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the global orchestrate file stored
global_orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
global_pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# storage for properties file
props_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Use our custom manager object
objects = StackQuerySet.as_manager()
def __unicode__(self):
return u'{0} (id={1})'.format(self.title, self.id)
def set_status(self, event, status, detail, level=Level.INFO):
self.status = status
self.save()
self.history.create(event=event, status=status,
status_detail=detail, level=level)
def get_driver_hosts_map(self, host_ids=None):
"""
Stacks are comprised of multiple hosts. Each host may be
located in different cloud accounts. This method returns
a map of the underlying driver implementation and the hosts
that running in the account.
@param host_ids (list); a list of primary keys for the hosts
we're interested in
@returns (dict); each key is a provider driver implementation
with QuerySet value for the matching host objects
"""
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
for account, hosts in accounts.items():
result[account.get_driver()] = host_queryset.filter(id__in=[h.id for h in hosts])
return result
def get_hosts(self, host_ids=None):
"""
Quick way of getting all hosts or a subset for this stack.
@host_ids (list); list of primary keys of hosts in this stack
@returns (QuerySet);
"""
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids)
def get_formulas(self):
return self.blueprint.get_formulas()
def get_tags(self):
tags = {}
for label in self.labels.all():
tags[label.key] = label.value
tags['stack_id'] = self.id
# No name allowed. salt-cloud uses this and it would break everything.
if 'Name' in tags:
del tags['Name']
return tags
@property
def properties(self):
if not self.props_file:
return {}
with open(self.props_file.path, 'r') as f:
return json.load(f)
@properties.setter
def properties(self, props):
props_json = json.dumps(props, indent=4)
if not self.props_file:
self.props_file.save('stack.props', ContentFile(props_json))
else:
with open(self.props_file.path, 'w') as f:
f.write(props_json)
def create_security_groups(self):
for hostdef in self.blueprint.host_definitions.all():
# create the managed security group for each host definition
# and assign the rules to the group
sg_name = 'stackdio-managed-{0}-stack-{1}'.format(
hostdef.slug,
self.pk
)
sg_description = 'stackd.io managed security group'
# cloud account and driver for the host definition
account = hostdef.cloud_image.account
if not account.create_security_groups:
logger.debug('Skipping creation of {0} because security group creation is turned '
'off for the account'.format(sg_name))
continue
driver = account.get_driver()
try:
sg_id = driver.create_security_group(sg_name,
sg_description,
delete_if_exists=True)
except Exception as e:
err_msg = 'Error creating security group: {0}'.format(str(e))
self.set_status('create_security_groups', self.ERROR,
err_msg, Level.ERROR)
logger.debug('Created security group {0}: {1}'.format(
sg_name,
sg_id
))
for access_rule in hostdef.access_rules.all():
driver.authorize_security_group(sg_id, {
'protocol': access_rule.protocol,
'from_port': access_rule.from_port,
'to_port': access_rule.to_port,
'rule': access_rule.rule,
})
# create the security group object that we can use for tracking
self.security_groups.create(
account=account,
blueprint_host_definition=hostdef,
name=sg_name,
description=sg_description,
group_id=sg_id,
is_managed=True
)
def create_hosts(self, host_definition=None, count=None, backfill=False):
"""
Creates host objects on this Stack. If no arguments are given, then
all hosts available based on the Stack's blueprint host definitions
will be created. If args are given, then only the `count` for the
given `host_definition` will be created.
@param host_definition (BlueprintHostDefinition object); the host
definition to use for creating new hosts. If None, all host
definitions for the stack's blueprint will be used.
@param count (int); the number of hosts to create. If None, all
hosts will be created.
@param backfill (bool); If True, then hosts will be created with
hostnames that fill in any gaps if necessary. If False, then
hostnames will start at the end of the host list. This is only
used when `host_definition` and `count` arguments are provided.
"""
created_hosts = []
if host_definition is None:
host_definitions = self.blueprint.host_definitions.all()
else:
host_definitions = [host_definition]
for hostdef in host_definitions:
hosts = self.hosts.all()
if count is None:
start, end = 0, hostdef.count
indexes = range(start, end)
elif not hosts:
start, end = 0, count
indexes = range(start, end)
else:
if backfill:
hosts = hosts.order_by('index')
# The set of existing host indexes
host_indexes = set([h.index for h in hosts])
# The last index available
last_index = sorted(host_indexes)[-1]
# The set of expected indexes based on the last known
# index
expected_indexes = set(range(last_index + 1))
# Any gaps any the expected indexes?
gaps = expected_indexes - host_indexes
indexes = []
if gaps:
indexes = list(gaps)
count -= len(indexes)
start = sorted(host_indexes)[-1] + 1
end = start + count
indexes += range(start, end)
else:
start = hosts.order_by('-index')[0].index + 1
end = start + count
indexes = xrange(start, end)
# all components defined in the host definition
components = hostdef.formula_components.all()
# iterate over the host definition count and create individual
# host records on the stack
for i in indexes:
hostname = hostdef.hostname_template.format(
namespace=self.namespace,
index=i
)
kwargs = dict(
index=i,
cloud_image=hostdef.cloud_image,
blueprint_host_definition=hostdef,
instance_size=hostdef.size,
hostname=hostname,
sir_price=hostdef.spot_price,
state=Host.PENDING
)
if hostdef.cloud_image.account.vpc_enabled:
kwargs['subnet_id'] = hostdef.subnet_id
else:
kwargs['availability_zone'] = hostdef.zone
host = self.hosts.create(**kwargs)
account = host.cloud_image.account
# Add in the cloud account default security groups as
# defined by an admin.
account_groups = set(list(
account.security_groups.filter(
is_default=True
)
))
host.security_groups.add(*account_groups)
if account.create_security_groups:
# Add in the security group provided by this host definition,
# but only if this functionality is enabled on the account
security_group = SecurityGroup.objects.get(
stack=self,
blueprint_host_definition=hostdef
)
host.security_groups.add(security_group)
# add formula components
host.formula_components.add(*components)
for volumedef in hostdef.volumes.all():
self.volumes.create(
host=host,
snapshot=volumedef.snapshot,
hostname=hostname,
device=volumedef.device,
mount_point=volumedef.mount_point
)
created_hosts.append(host)
return created_hosts
def generate_cloud_map(self):
# TODO: Figure out a way to make this provider agnostic
# TODO: Should we store this somewhere instead of assuming
master = socket.getfqdn()
images = {}
hosts = self.hosts.all()
cluster_size = len(hosts)
for host in hosts:
# load provider yaml to extract default security groups
cloud_account = host.cloud_image.account
cloud_account_yaml = yaml.safe_load(cloud_account.yaml)[cloud_account.slug]
# pull various stuff we need for a host
roles = [c.sls_path for c in host.formula_components.all()]
instance_size = host.instance_size.title
security_groups = set([
sg.group_id for sg in host.security_groups.all()
])
volumes = host.volumes.all()
domain = cloud_account_yaml['append_domain']
fqdn = '{0}.{1}'.format(host.hostname, domain)
# The volumes will be defined on the map as well as in the grains.
# Those in the map are used by salt-cloud to create and attach
# the volumes (using the snapshot), whereas those on the grains
# are available for states and modules to play with (e.g., to
# mount the devices)
map_volumes = []
for vol in volumes:
v = {
'device': vol.device,
'mount_point': vol.mount_point,
# filesystem_type doesn't matter, should remove soon
'filesystem_type': vol.snapshot.filesystem_type,
'type': 'gp2',
}
if vol.volume_id:
v['volume_id'] = vol.volume_id
else:
v['snapshot'] = vol.snapshot.snapshot_id
map_volumes.append(v)
host_metadata = {
'name': host.hostname,
# The parameters in the minion dict will be passed on
# to the minion and set in its default configuration
# at /etc/salt/minion. This is where you would override
# any default values set by salt-minion
'minion': {
'master': master,
'log_level': 'debug',
'log_level_logfile': 'debug',
'mine_functions': {
'grains.items': []
},
# Grains are very useful when you need to set some
# static information about a machine (e.g., what stack
# id its registered under or how many total machines
# are in the cluster)
'grains': {
'roles': roles,
'stack_id': int(self.pk),
'fqdn': fqdn,
'domain': domain,
'cluster_size': cluster_size,
'stack_pillar_file': self.pillar_file.path,
'volumes': map_volumes,
'cloud_account': host.cloud_image.account.slug,
'cloud_image': host.cloud_image.slug,
'namespace': self.namespace,
},
},
# The rest of the settings in the map are salt-cloud
# specific and control the VM in various ways
# depending on the cloud account being used.
'size': instance_size,
'securitygroupid': list(security_groups),
'volumes': map_volumes,
'delvol_on_destroy': True,
'del_all_vols_on_destroy': True,
}
if cloud_account.vpc_enabled:
host_metadata['subnetid'] = host.subnet_id
else:
host_metadata['availability_zone'] = host.availability_zone.title
# Add in spot instance config if needed
if host.sir_price:
host_metadata['spot_config'] = {
'spot_price': str(host.sir_price) # convert to string
}
images.setdefault(host.cloud_image.slug, {})[host.hostname] = host_metadata
return images
def generate_map_file(self):
images = self.generate_cloud_map()
map_file_yaml = yaml.safe_dump(images, default_flow_style=False)
if not self.map_file:
self.map_file.save('stack.map', ContentFile(map_file_yaml))
else:
with open(self.map_file.path, 'w') as f:
f.write(map_file_yaml)
def generate_top_file(self):
top_file_data = {
'base': {
'G@stack_id:{0}'.format(self.pk): [
{'match': 'compound'},
'core.*',
]
}
}
top_file_yaml = yaml.safe_dump(top_file_data, default_flow_style=False)
if not self.top_file:
self.top_file.save('stack_{0}_top.sls'.format(self.pk), ContentFile(top_file_yaml))
else:
with open(self.top_file.path, 'w') as f:
f.write(top_file_yaml)
def generate_orchestrate_file(self):
hosts = self.hosts.all()
stack_target = 'G@stack_id:{0}'.format(self.pk)
def _matcher(sls_set):
return ' and '.join(
[stack_target] + ['G@roles:{0}'.format(i) for i in sls_set]
)
groups = {}
for host in hosts:
for component in host.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
orchestrate = {}
for order in sorted(groups.keys()):
for role in groups[order]:
orchestrate[role] = {
'salt.state': [
{'tgt': _matcher([role])},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.orchestrate_file:
self.orchestrate_file.save('orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_global_orchestrate_file(self):
accounts = set([host.cloud_image.account for host in self.hosts.all()])
orchestrate = {}
for account in accounts:
# Target the stack_id and cloud account
target = 'G@stack_id:{0} and G@cloud_account:{1}'.format(
self.id,
account.slug)
groups = {}
for component in account.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
for order in sorted(groups.keys()):
for role in groups[order]:
state_title = '{0}_{1}'.format(account.slug, role)
orchestrate[state_title] = {
'salt.state': [
{'tgt': target},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.global_orchestrate_file:
self.global_orchestrate_file.save('global_orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.global_orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
users = []
# pull the create_ssh_users property from the stackd.io config file.
# If it's False, we won't create ssh users on the box.
if self.create_users:
user_permissions_map = get_users_with_perms(
self, attach_perms=True, with_superusers=True, with_group_users=True
)
for user, perms in user_permissions_map.items():
if 'ssh_stack' in perms:
if user.settings.public_key:
logger.debug('Granting {0} ssh permission to stack: {1}'.format(
user.username,
self.title,
))
users.append({
'username': user.username,
'public_key': user.settings.public_key,
'id': user.id,
})
else:
logger.debug(
'User {0} has ssh permission for stack {1}, but has no public key. '
'Skipping.'.format(
user.username,
self.title,
)
)
pillar_props = {
'__stackdio__': {
'users': users
}
}
# If any of the formulas we're using have default pillar
# data defined in its corresponding SPECFILE, we need to pull
# that into our stack pillar file.
# First get the unique set of formulas
formulas = set()
for host in self.hosts.all():
formulas.update([c.formula for c in host.formula_components.all()])
# Update the formulas if requested
if update_formulas:
for formula in formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# for each unique formula, pull the properties from the SPECFILE
for formula in formulas:
recursive_update(pillar_props, formula.properties)
# Add in properties that were supplied via the blueprint and during
# stack creation
recursive_update(pillar_props, self.properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.pillar_file:
self.pillar_file.save('stack.pillar', ContentFile(pillar_file_yaml))
else:
with open(self.pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def generate_global_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
pillar_props = {}
# Find all of the globally used formulas for the stack
accounts = set(
[host.cloud_image.account for
host in self.hosts.all()]
)
global_formulas = []
for account in accounts:
global_formulas.extend(account.get_formulas())
# Update the formulas if requested
if update_formulas:
for formula in global_formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# Add the global formulas into the props
for formula in set(global_formulas):
recursive_update(pillar_props, formula.properties)
# Add in the account properties AFTER the stack properties
for account in accounts:
recursive_update(pillar_props,
account.global_orchestration_properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.global_pillar_file:
self.global_pillar_file.save('stack.global_pillar', ContentFile(pillar_file_yaml))
else:
with open(self.global_pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def query_hosts(self, force=False):
"""
Uses salt-cloud to query all the hosts for the given stack id.
"""
CACHE_KEY = 'salt-cloud-full-query'
cached_result = cache.get(CACHE_KEY)
if cached_result and not force:
logger.debug('salt-cloud query result cached')
result = cached_result
else:
logger.debug('salt-cloud query result not cached, retrieving')
logger.info('get_hosts_info: {0!r}'.format(self))
salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)
result = salt_cloud.full_query()
# Cache the result for a minute
cache.set(CACHE_KEY, result, 60)
# yaml_result contains all host information in the stack, but
# we have to dig a bit to get individual host metadata out
# of account and provider type dictionaries
host_result = {}
for host in self.hosts.all():
account = host.get_account()
provider = account.provider
# each host is buried in a cloud provider type dict that's
# inside a cloud account name dict
# Grab the list of hosts
host_map = result.get(account.slug, {}).get(provider.name, {})
# Grab the individual host
host_result[host.hostname] = host_map.get(host.hostname, None)
return host_result
def get_root_directory(self):
if self.map_file:
return os.path.dirname(self.map_file.path)
if self.props_file:
return os.path.dirname(self.props_file.path)
return None
def get_log_directory(self):
root_dir = self.get_root_directory()
log_dir = os.path.join(root_dir, 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
return log_dir
def get_security_groups(self):
return SecurityGroup.objects.filter(is_managed=True,
hosts__stack=self).distinct()
def get_role_list(self):
roles = set()
for bhd in self.blueprint.host_definitions.all():
for formula_component in bhd.formula_components.all():
roles.add(formula_component.sls_path)
return list(roles)
class StackHistory(TimeStampedModel, StatusDetailModel):
class Meta:
verbose_name_plural = 'stack history'
ordering = ['-created', '-id']
default_permissions = ()
STATUS = Stack.STATUS
stack = models.ForeignKey('Stack', related_name='history')
# What 'event' (method name, task name, etc) that caused
# this status update
event = models.CharField(max_length=128)
# The human-readable description of the event
# status = models.TextField(blank=True)
# Optional: level (DEBUG, INFO, WARNING, ERROR, etc)
level = models.CharField(max_length=16, choices=(
(Level.DEBUG, Level.DEBUG),
(Level.INFO, Level.INFO),
(Level.WARN, Level.WARN),
(Level.ERROR, Level.ERROR),
))
class StackCommand(TimeStampedModel, StatusModel):
WAITING = 'waiting'
RUNNING = 'running'
FINISHED = 'finished'
ERROR = 'error'
STATUS = Choices(WAITING, RUNNING, FINISHED, ERROR)
class Meta:
verbose_name_plural = 'stack actions'
default_permissions = ()
stack = models.ForeignKey('Stack', related_name='commands')
# The started executing
start = models.DateTimeField('Start Time', blank=True, default=now)
# Which hosts we want to target
host_target = models.CharField('Host Target', max_length=255)
| std_out_storage = models.TextField()
# The error output from the action
std_err_storage = models.TextField()
@property
def std_out(self):
if self.std_out_storage != "":
return json.loads(self.std_out_storage)
else:
return []
@property
def std_err(self):
return self.std_err_storage
@property
def submit_time(self):
return self.created
@property
def start_time(self):
if self.status in (self.RUNNING, self.FINISHED):
return self.start
else:
return ''
@property
def finish_time(self):
if self.status == self.FINISHED:
return self.modified
else:
return ''
class Host(TimeStampedModel, StatusDetailModel):
PENDING = 'pending'
OK = 'ok'
DELETING = 'deleting'
STATUS = Choices(PENDING, OK, DELETING)
class Meta:
ordering = ['blueprint_host_definition', '-index']
default_permissions = ()
# TODO: We should be using generic foreign keys here to a cloud account
# specific implementation of a Host object. I'm not exactly sure how this
# will work, but I think by using Django's content type system we can make
# it work...just not sure how easy it will be to extend, maintain, etc.
stack = models.ForeignKey('Stack',
related_name='hosts')
cloud_image = models.ForeignKey('cloud.CloudImage',
related_name='hosts')
instance_size = models.ForeignKey('cloud.CloudInstanceSize',
related_name='hosts')
availability_zone = models.ForeignKey('cloud.CloudZone',
null=True,
related_name='hosts')
subnet_id = models.CharField('Subnet ID', max_length=32, blank=True, default='')
blueprint_host_definition = models.ForeignKey(
'blueprints.BlueprintHostDefinition',
related_name='hosts')
hostname = models.CharField('Hostname', max_length=64)
index = models.IntegerField('Index')
security_groups = models.ManyToManyField('cloud.SecurityGroup',
related_name='hosts')
# The machine state as provided by the cloud account
state = models.CharField('State', max_length=32, default='unknown')
state_reason = models.CharField('State Reason', max_length=255, default='', blank=True)
# This must be updated automatically after the host is online.
# After salt-cloud has launched VMs, we will need to look up
# the DNS name set by whatever cloud provider is being used
# and set it here
provider_dns = models.CharField('Provider DNS', max_length=64, blank=True)
provider_private_dns = models.CharField('Provider Private DNS', max_length=64, blank=True)
provider_private_ip = models.CharField('Provider Private IP Address', max_length=64, blank=True)
# The FQDN for the host. This includes the hostname and the
# domain if it was registered with DNS
fqdn = models.CharField('FQDN', max_length=255, blank=True)
# Instance id of the running host. This is provided by the cloud
# provider
instance_id = models.CharField('Instance ID', max_length=32, blank=True)
# Spot instance request ID will be populated when metadata is refreshed
# if the host has been configured to launch spot instances. By default,
# it will be unknown and will be set to NA if spot instances were not
# used.
sir_id = models.CharField('SIR ID',
max_length=32,
default='unknown')
# The spot instance price for this host if using spot instances
sir_price = models.DecimalField('Spot Price',
max_digits=5,
decimal_places=2,
null=True)
def __unicode__(self):
return self.hostname
@property
def provider_metadata(self):
metadata = self.stack.query_hosts()
return metadata[self.hostname]
@property
def formula_components(self):
return self.blueprint_host_definition.formula_components
def get_account(self):
return self.cloud_image.account
def get_provider(self):
return self.get_account().provider
def get_driver(self):
return self.cloud_image.get_driver() | # The command to be run (for custom actions)
command = models.TextField('Command')
# The output from the action | random_line_split |
models.py | # -*- coding: utf-8 -*-
# Copyright 2016, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import socket
import salt.cloud
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, transaction
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from django_extensions.db.models import (
TimeStampedModel,
TitleSlugDescriptionModel,
)
from guardian.shortcuts import get_users_with_perms
from model_utils import Choices
from model_utils.models import StatusModel
from stackdio.core.fields import DeletingFileField
from stackdio.core.utils import recursive_update
from stackdio.api.cloud.models import SecurityGroup
PROTOCOL_CHOICES = [
('tcp', 'TCP'),
('udp', 'UDP'),
('icmp', 'ICMP'),
]
logger = logging.getLogger(__name__)
HOST_INDEX_PATTERN = re.compile(r'.*-.*-(\d+)')
def get_hostnames_from_hostdefs(hostdefs, username='', namespace=''):
hostnames = []
for hostdef in hostdefs:
for i in xrange(hostdef.count):
hostnames.append(
hostdef.hostname_template.format(
namespace=namespace,
username=username,
index=i
)
)
return hostnames
class StackCreationException(Exception):
def __init__(self, errors, *args, **kwargs):
self.errors = errors
super(StackCreationException, self).__init__(*args, **kwargs)
class Level(object):
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARNING'
ERROR = 'ERROR'
class StatusDetailModel(StatusModel):
status_detail = models.TextField(blank=True)
class Meta:
abstract = True
default_permissions = ()
def set_status(self, status, detail=''):
self.status = status
self.status_detail = detail
return self.save()
class StackQuerySet(models.QuerySet):
def create(self, **kwargs):
new_properties = kwargs.pop('properties', {})
with transaction.atomic(using=self.db):
stack = super(StackQuerySet, self).create(**kwargs)
# manage the properties
properties = stack.blueprint.properties
recursive_update(properties, new_properties)
stack.properties = properties
# Create the appropriate hosts & security group objects
stack.create_security_groups()
stack.create_hosts()
return stack
_stack_model_permissions = (
'create',
'admin',
)
_stack_object_permissions = (
'launch',
'view',
'update',
'ssh',
'provision',
'orchestrate',
'execute',
'start',
'stop',
'terminate',
'delete',
'admin',
)
stack_storage = FileSystemStorage(location=os.path.join(settings.FILE_STORAGE_DIRECTORY, 'stacks'))
# For map, pillar, and properties. Doesn't need to go in a sub directory
def get_local_file_path(instance, filename):
return '{0}-{1}/{2}'.format(instance.pk, instance.slug, filename)
# Orchestrate files go in formula directory
def get_orchestrate_file_path(instance, filename):
return '{0}-{1}/formulas/__stackdio__/{2}'.format(instance.pk, instance.slug, filename)
class Stack(TimeStampedModel, TitleSlugDescriptionModel, StatusModel):
# Launch workflow:
PENDING = 'pending'
LAUNCHING = 'launching'
CONFIGURING = 'configuring'
SYNCING = 'syncing'
PROVISIONING = 'provisioning'
ORCHESTRATING = 'orchestrating'
FINALIZING = 'finalizing'
FINISHED = 'finished'
# Delete workflow:
# PENDING
DESTROYING = 'destroying'
# FINISHED
# Other actions
# LAUNCHING
STARTING = 'starting'
STOPPING = 'stopping'
TERMINATING = 'terminating'
EXECUTING_ACTION = 'executing_action'
# Errors
ERROR = 'error'
SAFE_STATES = [FINISHED, ERROR]
# Not sure?
OK = 'ok'
RUNNING = 'running'
REBOOTING = 'rebooting'
STATUS = Choices(PENDING, LAUNCHING, CONFIGURING, SYNCING, PROVISIONING,
ORCHESTRATING, FINALIZING, DESTROYING, FINISHED,
STARTING, STOPPING, TERMINATING, EXECUTING_ACTION, ERROR)
model_permissions = _stack_model_permissions
object_permissions = _stack_object_permissions
searchable_fields = ('title', 'description', 'history__status_detail')
class Meta:
ordering = ('title',)
default_permissions = tuple(set(_stack_model_permissions + _stack_object_permissions))
unique_together = ('title',)
# What blueprint did this stack derive from?
blueprint = models.ForeignKey('blueprints.Blueprint', related_name='stacks')
formula_versions = GenericRelation('formulas.FormulaVersion')
labels = GenericRelation('core.Label')
# An arbitrary namespace for this stack. Mainly useful for Blueprint
# hostname templates
namespace = models.CharField('Namespace', max_length=64)
create_users = models.BooleanField('Create SSH Users')
# Where on disk is the salt-cloud map file stored
map_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom salt top.sls file stored
top_file = DeletingFileField(
max_length=255,
null=True,
blank=True,
default=None,
storage=FileSystemStorage(location=settings.STACKDIO_CONFIG.salt_core_states))
# Where on disk is the custom orchestrate file stored
orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the global orchestrate file stored
global_orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
global_pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# storage for properties file
props_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Use our custom manager object
objects = StackQuerySet.as_manager()
def __unicode__(self):
return u'{0} (id={1})'.format(self.title, self.id)
def set_status(self, event, status, detail, level=Level.INFO):
self.status = status
self.save()
self.history.create(event=event, status=status,
status_detail=detail, level=level)
def get_driver_hosts_map(self, host_ids=None):
"""
Stacks are comprised of multiple hosts. Each host may be
located in different cloud accounts. This method returns
a map of the underlying driver implementation and the hosts
that running in the account.
@param host_ids (list); a list of primary keys for the hosts
we're interested in
@returns (dict); each key is a provider driver implementation
with QuerySet value for the matching host objects
"""
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
for account, hosts in accounts.items():
|
return result
def get_hosts(self, host_ids=None):
"""
Quick way of getting all hosts or a subset for this stack.
@host_ids (list); list of primary keys of hosts in this stack
@returns (QuerySet);
"""
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids)
def get_formulas(self):
return self.blueprint.get_formulas()
def get_tags(self):
tags = {}
for label in self.labels.all():
tags[label.key] = label.value
tags['stack_id'] = self.id
# No name allowed. salt-cloud uses this and it would break everything.
if 'Name' in tags:
del tags['Name']
return tags
@property
def properties(self):
if not self.props_file:
return {}
with open(self.props_file.path, 'r') as f:
return json.load(f)
@properties.setter
def properties(self, props):
props_json = json.dumps(props, indent=4)
if not self.props_file:
self.props_file.save('stack.props', ContentFile(props_json))
else:
with open(self.props_file.path, 'w') as f:
f.write(props_json)
def create_security_groups(self):
for hostdef in self.blueprint.host_definitions.all():
# create the managed security group for each host definition
# and assign the rules to the group
sg_name = 'stackdio-managed-{0}-stack-{1}'.format(
hostdef.slug,
self.pk
)
sg_description = 'stackd.io managed security group'
# cloud account and driver for the host definition
account = hostdef.cloud_image.account
if not account.create_security_groups:
logger.debug('Skipping creation of {0} because security group creation is turned '
'off for the account'.format(sg_name))
continue
driver = account.get_driver()
try:
sg_id = driver.create_security_group(sg_name,
sg_description,
delete_if_exists=True)
except Exception as e:
err_msg = 'Error creating security group: {0}'.format(str(e))
self.set_status('create_security_groups', self.ERROR,
err_msg, Level.ERROR)
logger.debug('Created security group {0}: {1}'.format(
sg_name,
sg_id
))
for access_rule in hostdef.access_rules.all():
driver.authorize_security_group(sg_id, {
'protocol': access_rule.protocol,
'from_port': access_rule.from_port,
'to_port': access_rule.to_port,
'rule': access_rule.rule,
})
# create the security group object that we can use for tracking
self.security_groups.create(
account=account,
blueprint_host_definition=hostdef,
name=sg_name,
description=sg_description,
group_id=sg_id,
is_managed=True
)
def create_hosts(self, host_definition=None, count=None, backfill=False):
"""
Creates host objects on this Stack. If no arguments are given, then
all hosts available based on the Stack's blueprint host definitions
will be created. If args are given, then only the `count` for the
given `host_definition` will be created.
@param host_definition (BlueprintHostDefinition object); the host
definition to use for creating new hosts. If None, all host
definitions for the stack's blueprint will be used.
@param count (int); the number of hosts to create. If None, all
hosts will be created.
@param backfill (bool); If True, then hosts will be created with
hostnames that fill in any gaps if necessary. If False, then
hostnames will start at the end of the host list. This is only
used when `host_definition` and `count` arguments are provided.
"""
created_hosts = []
if host_definition is None:
host_definitions = self.blueprint.host_definitions.all()
else:
host_definitions = [host_definition]
for hostdef in host_definitions:
hosts = self.hosts.all()
if count is None:
start, end = 0, hostdef.count
indexes = range(start, end)
elif not hosts:
start, end = 0, count
indexes = range(start, end)
else:
if backfill:
hosts = hosts.order_by('index')
# The set of existing host indexes
host_indexes = set([h.index for h in hosts])
# The last index available
last_index = sorted(host_indexes)[-1]
# The set of expected indexes based on the last known
# index
expected_indexes = set(range(last_index + 1))
# Any gaps any the expected indexes?
gaps = expected_indexes - host_indexes
indexes = []
if gaps:
indexes = list(gaps)
count -= len(indexes)
start = sorted(host_indexes)[-1] + 1
end = start + count
indexes += range(start, end)
else:
start = hosts.order_by('-index')[0].index + 1
end = start + count
indexes = xrange(start, end)
# all components defined in the host definition
components = hostdef.formula_components.all()
# iterate over the host definition count and create individual
# host records on the stack
for i in indexes:
hostname = hostdef.hostname_template.format(
namespace=self.namespace,
index=i
)
kwargs = dict(
index=i,
cloud_image=hostdef.cloud_image,
blueprint_host_definition=hostdef,
instance_size=hostdef.size,
hostname=hostname,
sir_price=hostdef.spot_price,
state=Host.PENDING
)
if hostdef.cloud_image.account.vpc_enabled:
kwargs['subnet_id'] = hostdef.subnet_id
else:
kwargs['availability_zone'] = hostdef.zone
host = self.hosts.create(**kwargs)
account = host.cloud_image.account
# Add in the cloud account default security groups as
# defined by an admin.
account_groups = set(list(
account.security_groups.filter(
is_default=True
)
))
host.security_groups.add(*account_groups)
if account.create_security_groups:
# Add in the security group provided by this host definition,
# but only if this functionality is enabled on the account
security_group = SecurityGroup.objects.get(
stack=self,
blueprint_host_definition=hostdef
)
host.security_groups.add(security_group)
# add formula components
host.formula_components.add(*components)
for volumedef in hostdef.volumes.all():
self.volumes.create(
host=host,
snapshot=volumedef.snapshot,
hostname=hostname,
device=volumedef.device,
mount_point=volumedef.mount_point
)
created_hosts.append(host)
return created_hosts
def generate_cloud_map(self):
# TODO: Figure out a way to make this provider agnostic
# TODO: Should we store this somewhere instead of assuming
master = socket.getfqdn()
images = {}
hosts = self.hosts.all()
cluster_size = len(hosts)
for host in hosts:
# load provider yaml to extract default security groups
cloud_account = host.cloud_image.account
cloud_account_yaml = yaml.safe_load(cloud_account.yaml)[cloud_account.slug]
# pull various stuff we need for a host
roles = [c.sls_path for c in host.formula_components.all()]
instance_size = host.instance_size.title
security_groups = set([
sg.group_id for sg in host.security_groups.all()
])
volumes = host.volumes.all()
domain = cloud_account_yaml['append_domain']
fqdn = '{0}.{1}'.format(host.hostname, domain)
# The volumes will be defined on the map as well as in the grains.
# Those in the map are used by salt-cloud to create and attach
# the volumes (using the snapshot), whereas those on the grains
# are available for states and modules to play with (e.g., to
# mount the devices)
map_volumes = []
for vol in volumes:
v = {
'device': vol.device,
'mount_point': vol.mount_point,
# filesystem_type doesn't matter, should remove soon
'filesystem_type': vol.snapshot.filesystem_type,
'type': 'gp2',
}
if vol.volume_id:
v['volume_id'] = vol.volume_id
else:
v['snapshot'] = vol.snapshot.snapshot_id
map_volumes.append(v)
host_metadata = {
'name': host.hostname,
# The parameters in the minion dict will be passed on
# to the minion and set in its default configuration
# at /etc/salt/minion. This is where you would override
# any default values set by salt-minion
'minion': {
'master': master,
'log_level': 'debug',
'log_level_logfile': 'debug',
'mine_functions': {
'grains.items': []
},
# Grains are very useful when you need to set some
# static information about a machine (e.g., what stack
# id its registered under or how many total machines
# are in the cluster)
'grains': {
'roles': roles,
'stack_id': int(self.pk),
'fqdn': fqdn,
'domain': domain,
'cluster_size': cluster_size,
'stack_pillar_file': self.pillar_file.path,
'volumes': map_volumes,
'cloud_account': host.cloud_image.account.slug,
'cloud_image': host.cloud_image.slug,
'namespace': self.namespace,
},
},
# The rest of the settings in the map are salt-cloud
# specific and control the VM in various ways
# depending on the cloud account being used.
'size': instance_size,
'securitygroupid': list(security_groups),
'volumes': map_volumes,
'delvol_on_destroy': True,
'del_all_vols_on_destroy': True,
}
if cloud_account.vpc_enabled:
host_metadata['subnetid'] = host.subnet_id
else:
host_metadata['availability_zone'] = host.availability_zone.title
# Add in spot instance config if needed
if host.sir_price:
host_metadata['spot_config'] = {
'spot_price': str(host.sir_price) # convert to string
}
images.setdefault(host.cloud_image.slug, {})[host.hostname] = host_metadata
return images
def generate_map_file(self):
images = self.generate_cloud_map()
map_file_yaml = yaml.safe_dump(images, default_flow_style=False)
if not self.map_file:
self.map_file.save('stack.map', ContentFile(map_file_yaml))
else:
with open(self.map_file.path, 'w') as f:
f.write(map_file_yaml)
def generate_top_file(self):
top_file_data = {
'base': {
'G@stack_id:{0}'.format(self.pk): [
{'match': 'compound'},
'core.*',
]
}
}
top_file_yaml = yaml.safe_dump(top_file_data, default_flow_style=False)
if not self.top_file:
self.top_file.save('stack_{0}_top.sls'.format(self.pk), ContentFile(top_file_yaml))
else:
with open(self.top_file.path, 'w') as f:
f.write(top_file_yaml)
def generate_orchestrate_file(self):
hosts = self.hosts.all()
stack_target = 'G@stack_id:{0}'.format(self.pk)
def _matcher(sls_set):
return ' and '.join(
[stack_target] + ['G@roles:{0}'.format(i) for i in sls_set]
)
groups = {}
for host in hosts:
for component in host.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
orchestrate = {}
for order in sorted(groups.keys()):
for role in groups[order]:
orchestrate[role] = {
'salt.state': [
{'tgt': _matcher([role])},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.orchestrate_file:
self.orchestrate_file.save('orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_global_orchestrate_file(self):
accounts = set([host.cloud_image.account for host in self.hosts.all()])
orchestrate = {}
for account in accounts:
# Target the stack_id and cloud account
target = 'G@stack_id:{0} and G@cloud_account:{1}'.format(
self.id,
account.slug)
groups = {}
for component in account.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
for order in sorted(groups.keys()):
for role in groups[order]:
state_title = '{0}_{1}'.format(account.slug, role)
orchestrate[state_title] = {
'salt.state': [
{'tgt': target},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.global_orchestrate_file:
self.global_orchestrate_file.save('global_orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.global_orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
users = []
# pull the create_ssh_users property from the stackd.io config file.
# If it's False, we won't create ssh users on the box.
if self.create_users:
user_permissions_map = get_users_with_perms(
self, attach_perms=True, with_superusers=True, with_group_users=True
)
for user, perms in user_permissions_map.items():
if 'ssh_stack' in perms:
if user.settings.public_key:
logger.debug('Granting {0} ssh permission to stack: {1}'.format(
user.username,
self.title,
))
users.append({
'username': user.username,
'public_key': user.settings.public_key,
'id': user.id,
})
else:
logger.debug(
'User {0} has ssh permission for stack {1}, but has no public key. '
'Skipping.'.format(
user.username,
self.title,
)
)
pillar_props = {
'__stackdio__': {
'users': users
}
}
# If any of the formulas we're using have default pillar
# data defined in its corresponding SPECFILE, we need to pull
# that into our stack pillar file.
# First get the unique set of formulas
formulas = set()
for host in self.hosts.all():
formulas.update([c.formula for c in host.formula_components.all()])
# Update the formulas if requested
if update_formulas:
for formula in formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# for each unique formula, pull the properties from the SPECFILE
for formula in formulas:
recursive_update(pillar_props, formula.properties)
# Add in properties that were supplied via the blueprint and during
# stack creation
recursive_update(pillar_props, self.properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.pillar_file:
self.pillar_file.save('stack.pillar', ContentFile(pillar_file_yaml))
else:
with open(self.pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def generate_global_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
pillar_props = {}
# Find all of the globally used formulas for the stack
accounts = set(
[host.cloud_image.account for
host in self.hosts.all()]
)
global_formulas = []
for account in accounts:
global_formulas.extend(account.get_formulas())
# Update the formulas if requested
if update_formulas:
for formula in global_formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# Add the global formulas into the props
for formula in set(global_formulas):
recursive_update(pillar_props, formula.properties)
# Add in the account properties AFTER the stack properties
for account in accounts:
recursive_update(pillar_props,
account.global_orchestration_properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.global_pillar_file:
self.global_pillar_file.save('stack.global_pillar', ContentFile(pillar_file_yaml))
else:
with open(self.global_pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def query_hosts(self, force=False):
"""
Uses salt-cloud to query all the hosts for the given stack id.
"""
CACHE_KEY = 'salt-cloud-full-query'
cached_result = cache.get(CACHE_KEY)
if cached_result and not force:
logger.debug('salt-cloud query result cached')
result = cached_result
else:
logger.debug('salt-cloud query result not cached, retrieving')
logger.info('get_hosts_info: {0!r}'.format(self))
salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)
result = salt_cloud.full_query()
# Cache the result for a minute
cache.set(CACHE_KEY, result, 60)
# yaml_result contains all host information in the stack, but
# we have to dig a bit to get individual host metadata out
# of account and provider type dictionaries
host_result = {}
for host in self.hosts.all():
account = host.get_account()
provider = account.provider
# each host is buried in a cloud provider type dict that's
# inside a cloud account name dict
# Grab the list of hosts
host_map = result.get(account.slug, {}).get(provider.name, {})
# Grab the individual host
host_result[host.hostname] = host_map.get(host.hostname, None)
return host_result
def get_root_directory(self):
if self.map_file:
return os.path.dirname(self.map_file.path)
if self.props_file:
return os.path.dirname(self.props_file.path)
return None
def get_log_directory(self):
root_dir = self.get_root_directory()
log_dir = os.path.join(root_dir, 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
return log_dir
def get_security_groups(self):
return SecurityGroup.objects.filter(is_managed=True,
hosts__stack=self).distinct()
def get_role_list(self):
roles = set()
for bhd in self.blueprint.host_definitions.all():
for formula_component in bhd.formula_components.all():
roles.add(formula_component.sls_path)
return list(roles)
class StackHistory(TimeStampedModel, StatusDetailModel):
class Meta:
verbose_name_plural = 'stack history'
ordering = ['-created', '-id']
default_permissions = ()
STATUS = Stack.STATUS
stack = models.ForeignKey('Stack', related_name='history')
# What 'event' (method name, task name, etc) that caused
# this status update
event = models.CharField(max_length=128)
# The human-readable description of the event
# status = models.TextField(blank=True)
# Optional: level (DEBUG, INFO, WARNING, ERROR, etc)
level = models.CharField(max_length=16, choices=(
(Level.DEBUG, Level.DEBUG),
(Level.INFO, Level.INFO),
(Level.WARN, Level.WARN),
(Level.ERROR, Level.ERROR),
))
class StackCommand(TimeStampedModel, StatusModel):
WAITING = 'waiting'
RUNNING = 'running'
FINISHED = 'finished'
ERROR = 'error'
STATUS = Choices(WAITING, RUNNING, FINISHED, ERROR)
class Meta:
verbose_name_plural = 'stack actions'
default_permissions = ()
stack = models.ForeignKey('Stack', related_name='commands')
# The started executing
start = models.DateTimeField('Start Time', blank=True, default=now)
# Which hosts we want to target
host_target = models.CharField('Host Target', max_length=255)
# The command to be run (for custom actions)
command = models.TextField('Command')
# The output from the action
std_out_storage = models.TextField()
# The error output from the action
std_err_storage = models.TextField()
@property
def std_out(self):
if self.std_out_storage != "":
return json.loads(self.std_out_storage)
else:
return []
@property
def std_err(self):
return self.std_err_storage
@property
def submit_time(self):
return self.created
@property
def start_time(self):
if self.status in (self.RUNNING, self.FINISHED):
return self.start
else:
return ''
@property
def finish_time(self):
if self.status == self.FINISHED:
return self.modified
else:
return ''
class Host(TimeStampedModel, StatusDetailModel):
PENDING = 'pending'
OK = 'ok'
DELETING = 'deleting'
STATUS = Choices(PENDING, OK, DELETING)
class Meta:
ordering = ['blueprint_host_definition', '-index']
default_permissions = ()
# TODO: We should be using generic foreign keys here to a cloud account
# specific implementation of a Host object. I'm not exactly sure how this
# will work, but I think by using Django's content type system we can make
# it work...just not sure how easy it will be to extend, maintain, etc.
stack = models.ForeignKey('Stack',
related_name='hosts')
cloud_image = models.ForeignKey('cloud.CloudImage',
related_name='hosts')
instance_size = models.ForeignKey('cloud.CloudInstanceSize',
related_name='hosts')
availability_zone = models.ForeignKey('cloud.CloudZone',
null=True,
related_name='hosts')
subnet_id = models.CharField('Subnet ID', max_length=32, blank=True, default='')
blueprint_host_definition = models.ForeignKey(
'blueprints.BlueprintHostDefinition',
related_name='hosts')
hostname = models.CharField('Hostname', max_length=64)
index = models.IntegerField('Index')
security_groups = models.ManyToManyField('cloud.SecurityGroup',
related_name='hosts')
# The machine state as provided by the cloud account
state = models.CharField('State', max_length=32, default='unknown')
state_reason = models.CharField('State Reason', max_length=255, default='', blank=True)
# This must be updated automatically after the host is online.
# After salt-cloud has launched VMs, we will need to look up
# the DNS name set by whatever cloud provider is being used
# and set it here
provider_dns = models.CharField('Provider DNS', max_length=64, blank=True)
provider_private_dns = models.CharField('Provider Private DNS', max_length=64, blank=True)
provider_private_ip = models.CharField('Provider Private IP Address', max_length=64, blank=True)
# The FQDN for the host. This includes the hostname and the
# domain if it was registered with DNS
fqdn = models.CharField('FQDN', max_length=255, blank=True)
# Instance id of the running host. This is provided by the cloud
# provider
instance_id = models.CharField('Instance ID', max_length=32, blank=True)
# Spot instance request ID will be populated when metadata is refreshed
# if the host has been configured to launch spot instances. By default,
# it will be unknown and will be set to NA if spot instances were not
# used.
sir_id = models.CharField('SIR ID',
max_length=32,
default='unknown')
# The spot instance price for this host if using spot instances
sir_price = models.DecimalField('Spot Price',
max_digits=5,
decimal_places=2,
null=True)
def __unicode__(self):
return self.hostname
@property
def provider_metadata(self):
metadata = self.stack.query_hosts()
return metadata[self.hostname]
@property
def formula_components(self):
return self.blueprint_host_definition.formula_components
def get_account(self):
return self.cloud_image.account
def get_provider(self):
return self.get_account().provider
def get_driver(self):
return self.cloud_image.get_driver()
| result[account.get_driver()] = host_queryset.filter(id__in=[h.id for h in hosts]) | conditional_block |
download_data.py | import requests
from datetime import datetime, timedelta
import pandas as pd
import json
import os
from difflib import SequenceMatcher
import time
DATA_FOLDER = "./data"
base_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"
date_format ='%m-%d-%Y' #01-22-2020.csv
df_date_format = '%Y-%m-%d'
communites_list = ['Castilla-Leon',
'Cataluña',
'Ceuta',
'Murcia',
'La Rioja',
'Baleares',
'Canarias',
'Cantabria',
'Andalucia',
'Asturias',
'Valencia',
'Melilla',
'Navarra',
'Galicia',
'Aragon',
'Madrid',
'Extremadura',
'Castilla-La Mancha',
'Pais Vasco']
#communites_geojson = read_communites_geojson()
correspondence_dict = {'Andalusia': 'Andalucia',
'Aragon': 'Aragon',
'Asturias': 'Asturias',
'Baleares': 'Baleares',
'C. Valenciana': 'Valencia',
'Canarias': 'Canarias',
'Cantabria': 'Cantabria',
'Castilla - La Mancha': 'Castilla-La Mancha',
'Castilla y Leon': 'Castilla-Leon',
'Catalonia': 'Cataluña',
'Ceuta': 'Ceuta',
'Extremadura': 'Extremadura',
'Galicia': 'Galicia',
'La Rioja': 'La Rioja',
'Madrid': 'Madrid',
'Melilla': 'Melilla',
'Murcia': 'Murcia',
'Navarra': 'Navarra',
'Pais Vasco': 'Pais Vasco'}
#communities_geojson = read_communites_geojson("spain-communites-v2")
def correspondence_string(string, list_to_match):
current_ratio = 0
return_string = None
for string_from_list in list_to_match:
ratio = SequenceMatcher(None, string, string_from_list).ratio()
if ratio > current_ratio:
current_ratio = ratio
return_string = string_from_list
return return_string
def get_correspondence_dict(covid_communities_name, string_list = communites_list):
dic_correspondence = {}
for original_string in covid_communities_name:
dic_correspondence[original_string] = correspondence_string(original_string, string_list)
return dic_correspondence
#get_correspondence_dict(dfs[0]["Province_State"])
def format_day(day_str):
day = datetime.strptime(day_str, "%m-%d-%Y")
return datetime.strftime(day, "%Y-%m-%d")
def read_communites_geojson(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
nameunit = region["properties"]["nameunit"]
if "/" in nameunit:
region["properties"]["nameunit"] = nameunit.split("/")[0]
if 'Ciudad Autónoma de Ceuta' in nameunit:
region["properties"]["nameunit"] = "Ceuta"
elif 'Ciudad Autónoma de Melilla' in nameunit:
region["properties"]["nameunit"] = "Melilla"
elif 'Comunidad Foral de Navarra' in nameunit:
region["properties"]["nameunit"] = "Navarra"
communites.append(region["properties"]["nameunit"])
return geojson, communites
def get_communites(geojson):
regions = []
for region in geojson['features']:
if region["properties"]["name"] == "Valencia":
region["properties"]["name"] = "C. Valenciana"
regions.append(region["properties"]["name"])
return regions
def generate_days(start_date):
end_date = datetime.now()
step = timedelta(days=1)
result = []
while start_date < end_date:
result.append(start_date.strftime(date_format))
start_date += step
return result
#download_all_datasets()
def add_to_list(l, lat_sum, lon_sum):
# For moving Canary Islands near Spain.
# l is the list of list of lists .... of coordinates
if isinstance(l, list) and isinstance(l[0], float) and isinstance(l[1], float):
return l[0] + lat_sum, l[1] + lon_sum
return [add_to_list(sub, lat_sum, lon_sum) for sub in l]
def reduce_precission(l, ndigits):
if not isinstance(l, list):
return round(l, ndigits)
return [reduce_precission(sub, ndigits) for sub in l]
def read_original_to_displaced_canaries():
lat_sum = 6.65456
lon_sum = 5.65412
geojson,b = read_communites_geojson_v1('spain-communities')
for region in geojson['features']:
name = region["properties"]["name"]
if name == "Canarias":
region["geometry"]["coordinates"] = add_to_list(region["geometry"]["coordinates"],
lat_sum,
lon_sum)
with open(f"./data/geojson/spain-communites-displaced-canary.geojson", "w") as f:
json.dump(geojson, f)
def read_communites_geojson_v1(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
name = region["properties"]["name"]
communites.append(name)
return geojson, communites
#communities_geojson,b = read_communites_geojson_v1()
def read_population_dataset(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if ". | population = pd.read_csv(f"./data/population/{name}", sep=";")
#population = population[population["Periodo"] == 2019]
#population = population[population["Sexo"] == "Total"]
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunitat Valenciana", 'Comunidades y Ciudades Autónomas'] = 'Valencia'
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunidad Foral de Navarra", 'Comunidades y Ciudades Autónomas'] = 'Navarra'
correspondence_dict_population = get_correspondence_dict(df_diario_acumulado["Comunidad Autónoma"].unique(),population['Comunidades y Ciudades Autónomas'])
population.rename(columns = {'Comunidades y Ciudades Autónomas': 'Comunidad'}, inplace = True)
return population, correspondence_dict_population
def get_pop(com):
com = correspondence_dict_population[com]
return int(pop_df.loc[pop_df["Comunidad"]==com, 'Total'])
def tasa_mortalidad_y_letalidad(df):
df["% de letalidad"] = df['Muertes'] * 100 / df['Confirmados']
df["% Población contagiada total"] = df['Confirmados'] * 100 / df["Población"]
df["% Población fallecida total"] = df['Muertes'] * 100 / df["Población"]
def obtener_df_semanal(df):
df["Datetime"] = pd.to_datetime(df['Día'], format=df_date_format)
df["dia_sem"] = [day.weekday() for day in df['Datetime']]
df_semanal = df[df["dia_sem"] == 6].copy()
df.drop(columns= ["dia_sem", 'Datetime'], inplace = True)
df_semanal.drop(columns = ["dia_sem", 'Datetime'], inplace = True)
return df_semanal
def save_df(name, df):
df.to_csv(os.path.join(DATA_FOLDER, "final_data", name),
encoding='UTF-8',
sep=";", index= False)
def obtener_df_semanal_desacumulado(df):
dfs_desacumulados = []
for com in df["Comunidad Autónoma"].unique():
df_com = df[df['Comunidad Autónoma']==com].copy()
for column in ["Confirmados", "Muertes"]:
df_com.sort_values(by="Día", inplace = True)
df_com[column] = df_com[column].diff()
df_com.dropna(inplace = True)
dfs_desacumulados.append(df_com)
dfs_desacumulado = pd.concat(dfs_desacumulados)
dfs_desacumulado.drop(['Población',r'% de letalidad',
r'% Población contagiada total',
r'% Población fallecida total'],
inplace = True,
axis = 1)
dfs_desacumulado.sort_values(by = "Día", inplace = True)
return dfs_desacumulado
def correct_names():
def read_population_dataset_custom(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
return population
corres_dict = get_correspondence_dict(dfs[0]['Province_State'], read_population_dataset_custom()['Comunidades y Ciudades Autónomas'])
corres_dict['Valencia'] = 'Comunitat Valenciana'
corres_dict['Navarra'] = 'Comunidad Foral de Navarra'
corres_dict['Total'] = 'Total'
for key,value in corres_dict.items():
if " " == value[0]:
corres_dict[key]=value[1:]
return corres_dict
def download_all_datasets():
start_date = datetime(month = 5, day = 14, year=2020)
days = generate_days(start_date)
descargados = os.listdir(os.path.join(DATA_FOLDER,
"covid_data"))
for i, day in enumerate(days):
filename = f"{day}.csv"
if filename not in descargados:
try:
df = pd.read_csv(base_url + filename)
#df = df.loc[df['Country_Region'] == "Spain"]
df.to_csv(f"{DATA_FOLDER}/covid_data/{filename}", index = False)
except:
print(f"No se ha encontrado el día {day}")
download_all_datasets()
dfs = []
for file in sorted(os.listdir(os.path.join(DATA_FOLDER, "covid_data"))):
if ".csv" in file:
day = file[:-4]
df = pd.read_csv(f"{DATA_FOLDER}/covid_data/{file}")
df = df.loc[(df['Country_Region'] == "Spain") & (df['Province_State'] != "Unknown")]
df["Province_State"] = [correspondence_dict[province] for province in df["Province_State"]]
df = df[['Province_State','Country_Region',
'Last_Update','Confirmed',
'Deaths', 'Recovered',
'Active']]
df["Day"] = [format_day(day) for i in range(len(df))]
df.drop(columns = ["Active", 'Recovered', 'Last_Update'], inplace = True)
# df.to_csv("data.csv", index = False)
dfs.append(df)
df_diario_acumulado = pd.concat(dfs)
df_diario_acumulado.drop(columns=["Country_Region"], inplace = True)
df_diario_acumulado.rename(columns={"Province_State": "Comunidad Autónoma",
"Confirmed": "Confirmados",
"Deaths": "Muertes",
"Day":"Día"}, inplace = True)
pop_df, correspondence_dict_population = read_population_dataset()
df_diario_acumulado['Población'] = df_diario_acumulado["Comunidad Autónoma"].apply(lambda x: get_pop(x))
del pop_df, correspondence_dict_population
correct_communites = correct_names()
def get_total_rows(df_diario_acumulado):
dfs=[]
for day in df_diario_acumulado["Día"].unique():
df = df_diario_acumulado.loc[df_diario_acumulado['Día'] == day].copy()
total_row = {'Comunidad Autónoma':'Total', 'Confirmados':sum(df['Confirmados']),
'Muertes': sum(df['Muertes']), 'Día':day, 'Población':sum(df['Población'])}
df = df.append(total_row, ignore_index = True)
dfs.append(df)
df = pd.concat(dfs)
return df.sort_values(by='Día')
df_diario_acumulado = get_total_rows(df_diario_acumulado)
tasa_mortalidad_y_letalidad(df_diario_acumulado)
df_diario_acumulado['Comunidad/Ciudad Autónoma'] = [correct_communites[com] for com in df_diario_acumulado['Comunidad Autónoma']]
df_semanal_acumulado = obtener_df_semanal(df_diario_acumulado)
df_semanal_desacumulado = obtener_df_semanal_desacumulado(df_semanal_acumulado)
save_df('diario_acumulado.csv', df_diario_acumulado)
save_df('semanal_acumulado.csv', df_semanal_acumulado)
save_df('semanal_desacumulado.csv', df_semanal_desacumulado) | " in pop:
return int(pop.replace(".",""))
| identifier_body |
download_data.py | import requests
from datetime import datetime, timedelta
import pandas as pd
import json
import os
from difflib import SequenceMatcher
import time
DATA_FOLDER = "./data"
base_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"
date_format ='%m-%d-%Y' #01-22-2020.csv
df_date_format = '%Y-%m-%d'
communites_list = ['Castilla-Leon',
'Cataluña',
'Ceuta',
'Murcia',
'La Rioja',
'Baleares',
'Canarias',
'Cantabria',
'Andalucia',
'Asturias',
'Valencia',
'Melilla',
'Navarra',
'Galicia',
'Aragon',
'Madrid',
'Extremadura',
'Castilla-La Mancha',
'Pais Vasco']
#communites_geojson = read_communites_geojson()
correspondence_dict = {'Andalusia': 'Andalucia',
'Aragon': 'Aragon',
'Asturias': 'Asturias',
'Baleares': 'Baleares',
'C. Valenciana': 'Valencia',
'Canarias': 'Canarias',
'Cantabria': 'Cantabria',
'Castilla - La Mancha': 'Castilla-La Mancha',
'Castilla y Leon': 'Castilla-Leon',
'Catalonia': 'Cataluña',
'Ceuta': 'Ceuta',
'Extremadura': 'Extremadura',
'Galicia': 'Galicia',
'La Rioja': 'La Rioja',
'Madrid': 'Madrid',
'Melilla': 'Melilla',
'Murcia': 'Murcia',
'Navarra': 'Navarra',
'Pais Vasco': 'Pais Vasco'}
#communities_geojson = read_communites_geojson("spain-communites-v2")
def correspondence_string(string, list_to_match):
current_ratio = 0
return_string = None
for string_from_list in list_to_match:
ratio = SequenceMatcher(None, string, string_from_list).ratio()
if ratio > current_ratio:
current_ratio = ratio
return_string = string_from_list
return return_string
def get_correspondence_dict(covid_communities_name, string_list = communites_list):
dic_correspondence = {}
for original_string in covid_communities_name:
dic_correspondence[original_string] = correspondence_string(original_string, string_list)
return dic_correspondence
#get_correspondence_dict(dfs[0]["Province_State"])
def format_day(day_str):
day = datetime.strptime(day_str, "%m-%d-%Y")
return datetime.strftime(day, "%Y-%m-%d")
def read_communites_geojson(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
nameunit = region["properties"]["nameunit"]
if "/" in nameunit:
region["properties"]["nameunit"] = nameunit.split("/")[0]
if 'Ciudad Autónoma de Ceuta' in nameunit:
region["properties"]["nameunit"] = "Ceuta"
elif 'Ciudad Autónoma de Melilla' in nameunit:
region["properties"]["nameunit"] = "Melilla"
elif 'Comunidad Foral de Navarra' in nameunit:
region["properties"]["nameunit"] = "Navarra"
communites.append(region["properties"]["nameunit"])
return geojson, communites
def get_communites(geojson):
regions = []
for region in geojson['features']:
if region["properties"]["name"] == "Valencia":
region["properties"]["name"] = "C. Valenciana"
regions.append(region["properties"]["name"])
return regions
def generate_days(start_date):
end_date = datetime.now()
step = timedelta(days=1)
result = []
while start_date < end_date:
result.append(start_date.strftime(date_format))
start_date += step
return result
#download_all_datasets()
def add_ | lat_sum, lon_sum):
# For moving Canary Islands near Spain.
# l is the list of list of lists .... of coordinates
if isinstance(l, list) and isinstance(l[0], float) and isinstance(l[1], float):
return l[0] + lat_sum, l[1] + lon_sum
return [add_to_list(sub, lat_sum, lon_sum) for sub in l]
def reduce_precission(l, ndigits):
if not isinstance(l, list):
return round(l, ndigits)
return [reduce_precission(sub, ndigits) for sub in l]
def read_original_to_displaced_canaries():
lat_sum = 6.65456
lon_sum = 5.65412
geojson,b = read_communites_geojson_v1('spain-communities')
for region in geojson['features']:
name = region["properties"]["name"]
if name == "Canarias":
region["geometry"]["coordinates"] = add_to_list(region["geometry"]["coordinates"],
lat_sum,
lon_sum)
with open(f"./data/geojson/spain-communites-displaced-canary.geojson", "w") as f:
json.dump(geojson, f)
def read_communites_geojson_v1(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
name = region["properties"]["name"]
communites.append(name)
return geojson, communites
#communities_geojson,b = read_communites_geojson_v1()
def read_population_dataset(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
#population = population[population["Periodo"] == 2019]
#population = population[population["Sexo"] == "Total"]
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunitat Valenciana", 'Comunidades y Ciudades Autónomas'] = 'Valencia'
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunidad Foral de Navarra", 'Comunidades y Ciudades Autónomas'] = 'Navarra'
correspondence_dict_population = get_correspondence_dict(df_diario_acumulado["Comunidad Autónoma"].unique(),population['Comunidades y Ciudades Autónomas'])
population.rename(columns = {'Comunidades y Ciudades Autónomas': 'Comunidad'}, inplace = True)
return population, correspondence_dict_population
def get_pop(com):
com = correspondence_dict_population[com]
return int(pop_df.loc[pop_df["Comunidad"]==com, 'Total'])
def tasa_mortalidad_y_letalidad(df):
df["% de letalidad"] = df['Muertes'] * 100 / df['Confirmados']
df["% Población contagiada total"] = df['Confirmados'] * 100 / df["Población"]
df["% Población fallecida total"] = df['Muertes'] * 100 / df["Población"]
def obtener_df_semanal(df):
df["Datetime"] = pd.to_datetime(df['Día'], format=df_date_format)
df["dia_sem"] = [day.weekday() for day in df['Datetime']]
df_semanal = df[df["dia_sem"] == 6].copy()
df.drop(columns= ["dia_sem", 'Datetime'], inplace = True)
df_semanal.drop(columns = ["dia_sem", 'Datetime'], inplace = True)
return df_semanal
def save_df(name, df):
df.to_csv(os.path.join(DATA_FOLDER, "final_data", name),
encoding='UTF-8',
sep=";", index= False)
def obtener_df_semanal_desacumulado(df):
dfs_desacumulados = []
for com in df["Comunidad Autónoma"].unique():
df_com = df[df['Comunidad Autónoma']==com].copy()
for column in ["Confirmados", "Muertes"]:
df_com.sort_values(by="Día", inplace = True)
df_com[column] = df_com[column].diff()
df_com.dropna(inplace = True)
dfs_desacumulados.append(df_com)
dfs_desacumulado = pd.concat(dfs_desacumulados)
dfs_desacumulado.drop(['Población',r'% de letalidad',
r'% Población contagiada total',
r'% Población fallecida total'],
inplace = True,
axis = 1)
dfs_desacumulado.sort_values(by = "Día", inplace = True)
return dfs_desacumulado
def correct_names():
def read_population_dataset_custom(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
return population
corres_dict = get_correspondence_dict(dfs[0]['Province_State'], read_population_dataset_custom()['Comunidades y Ciudades Autónomas'])
corres_dict['Valencia'] = 'Comunitat Valenciana'
corres_dict['Navarra'] = 'Comunidad Foral de Navarra'
corres_dict['Total'] = 'Total'
for key,value in corres_dict.items():
if " " == value[0]:
corres_dict[key]=value[1:]
return corres_dict
def download_all_datasets():
start_date = datetime(month = 5, day = 14, year=2020)
days = generate_days(start_date)
descargados = os.listdir(os.path.join(DATA_FOLDER,
"covid_data"))
for i, day in enumerate(days):
filename = f"{day}.csv"
if filename not in descargados:
try:
df = pd.read_csv(base_url + filename)
#df = df.loc[df['Country_Region'] == "Spain"]
df.to_csv(f"{DATA_FOLDER}/covid_data/{filename}", index = False)
except:
print(f"No se ha encontrado el día {day}")
download_all_datasets()
dfs = []
for file in sorted(os.listdir(os.path.join(DATA_FOLDER, "covid_data"))):
if ".csv" in file:
day = file[:-4]
df = pd.read_csv(f"{DATA_FOLDER}/covid_data/{file}")
df = df.loc[(df['Country_Region'] == "Spain") & (df['Province_State'] != "Unknown")]
df["Province_State"] = [correspondence_dict[province] for province in df["Province_State"]]
df = df[['Province_State','Country_Region',
'Last_Update','Confirmed',
'Deaths', 'Recovered',
'Active']]
df["Day"] = [format_day(day) for i in range(len(df))]
df.drop(columns = ["Active", 'Recovered', 'Last_Update'], inplace = True)
# df.to_csv("data.csv", index = False)
dfs.append(df)
df_diario_acumulado = pd.concat(dfs)
df_diario_acumulado.drop(columns=["Country_Region"], inplace = True)
df_diario_acumulado.rename(columns={"Province_State": "Comunidad Autónoma",
"Confirmed": "Confirmados",
"Deaths": "Muertes",
"Day":"Día"}, inplace = True)
pop_df, correspondence_dict_population = read_population_dataset()
df_diario_acumulado['Población'] = df_diario_acumulado["Comunidad Autónoma"].apply(lambda x: get_pop(x))
del pop_df, correspondence_dict_population
correct_communites = correct_names()
def get_total_rows(df_diario_acumulado):
dfs=[]
for day in df_diario_acumulado["Día"].unique():
df = df_diario_acumulado.loc[df_diario_acumulado['Día'] == day].copy()
total_row = {'Comunidad Autónoma':'Total', 'Confirmados':sum(df['Confirmados']),
'Muertes': sum(df['Muertes']), 'Día':day, 'Población':sum(df['Población'])}
df = df.append(total_row, ignore_index = True)
dfs.append(df)
df = pd.concat(dfs)
return df.sort_values(by='Día')
df_diario_acumulado = get_total_rows(df_diario_acumulado)
tasa_mortalidad_y_letalidad(df_diario_acumulado)
df_diario_acumulado['Comunidad/Ciudad Autónoma'] = [correct_communites[com] for com in df_diario_acumulado['Comunidad Autónoma']]
df_semanal_acumulado = obtener_df_semanal(df_diario_acumulado)
df_semanal_desacumulado = obtener_df_semanal_desacumulado(df_semanal_acumulado)
save_df('diario_acumulado.csv', df_diario_acumulado)
save_df('semanal_acumulado.csv', df_semanal_acumulado)
save_df('semanal_desacumulado.csv', df_semanal_desacumulado) | to_list(l, | identifier_name |
download_data.py | import requests
from datetime import datetime, timedelta
import pandas as pd
import json
import os
from difflib import SequenceMatcher
import time
DATA_FOLDER = "./data"
base_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"
date_format ='%m-%d-%Y' #01-22-2020.csv
df_date_format = '%Y-%m-%d'
communites_list = ['Castilla-Leon',
'Cataluña',
'Ceuta',
'Murcia',
'La Rioja',
'Baleares',
'Canarias',
'Cantabria',
'Andalucia',
'Asturias',
'Valencia',
'Melilla',
'Navarra',
'Galicia',
'Aragon',
'Madrid',
'Extremadura',
'Castilla-La Mancha',
'Pais Vasco']
#communites_geojson = read_communites_geojson()
correspondence_dict = {'Andalusia': 'Andalucia',
'Aragon': 'Aragon',
'Asturias': 'Asturias',
'Baleares': 'Baleares',
'C. Valenciana': 'Valencia',
'Canarias': 'Canarias',
'Cantabria': 'Cantabria',
'Castilla - La Mancha': 'Castilla-La Mancha',
'Castilla y Leon': 'Castilla-Leon',
'Catalonia': 'Cataluña',
'Ceuta': 'Ceuta',
'Extremadura': 'Extremadura',
'Galicia': 'Galicia',
'La Rioja': 'La Rioja',
'Madrid': 'Madrid',
'Melilla': 'Melilla',
'Murcia': 'Murcia',
'Navarra': 'Navarra',
'Pais Vasco': 'Pais Vasco'}
#communities_geojson = read_communites_geojson("spain-communites-v2")
def correspondence_string(string, list_to_match):
current_ratio = 0
return_string = None
for string_from_list in list_to_match:
ratio = SequenceMatcher(None, string, string_from_list).ratio()
if ratio > current_ratio:
current_ratio = ratio
return_string = string_from_list
return return_string
def get_correspondence_dict(covid_communities_name, string_list = communites_list):
dic_correspondence = {}
for original_string in covid_communities_name:
dic_correspondence[original_string] = correspondence_string(original_string, string_list)
return dic_correspondence
#get_correspondence_dict(dfs[0]["Province_State"])
def format_day(day_str):
day = datetime.strptime(day_str, "%m-%d-%Y")
return datetime.strftime(day, "%Y-%m-%d")
def read_communites_geojson(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
nameunit = region["properties"]["nameunit"]
if "/" in nameunit:
region["properties"]["nameunit"] = nameunit.split("/")[0]
if 'Ciudad Autónoma de Ceuta' in nameunit:
region["properties"]["nameunit"] = "Ceuta"
elif 'Ciudad Autónoma de Melilla' in nameunit:
region["properties"]["nameunit"] = "Melilla"
elif 'Comunidad Foral de Navarra' in nameunit:
region["properties"]["nameunit"] = "Navarra"
communites.append(region["properties"]["nameunit"])
return geojson, communites
def get_communites(geojson):
regions = []
for region in geojson['features']:
if region["properties"]["name"] == "Valencia":
region["properties"]["name"] = "C. Valenciana"
regions.append(region["properties"]["name"])
return regions
def generate_days(start_date):
end_date = datetime.now()
step = timedelta(days=1)
result = []
while start_date < end_date:
resu | return result
#download_all_datasets()
def add_to_list(l, lat_sum, lon_sum):
# For moving Canary Islands near Spain.
# l is the list of list of lists .... of coordinates
if isinstance(l, list) and isinstance(l[0], float) and isinstance(l[1], float):
return l[0] + lat_sum, l[1] + lon_sum
return [add_to_list(sub, lat_sum, lon_sum) for sub in l]
def reduce_precission(l, ndigits):
if not isinstance(l, list):
return round(l, ndigits)
return [reduce_precission(sub, ndigits) for sub in l]
def read_original_to_displaced_canaries():
lat_sum = 6.65456
lon_sum = 5.65412
geojson,b = read_communites_geojson_v1('spain-communities')
for region in geojson['features']:
name = region["properties"]["name"]
if name == "Canarias":
region["geometry"]["coordinates"] = add_to_list(region["geometry"]["coordinates"],
lat_sum,
lon_sum)
with open(f"./data/geojson/spain-communites-displaced-canary.geojson", "w") as f:
json.dump(geojson, f)
def read_communites_geojson_v1(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
name = region["properties"]["name"]
communites.append(name)
return geojson, communites
#communities_geojson,b = read_communites_geojson_v1()
def read_population_dataset(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
#population = population[population["Periodo"] == 2019]
#population = population[population["Sexo"] == "Total"]
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunitat Valenciana", 'Comunidades y Ciudades Autónomas'] = 'Valencia'
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunidad Foral de Navarra", 'Comunidades y Ciudades Autónomas'] = 'Navarra'
correspondence_dict_population = get_correspondence_dict(df_diario_acumulado["Comunidad Autónoma"].unique(),population['Comunidades y Ciudades Autónomas'])
population.rename(columns = {'Comunidades y Ciudades Autónomas': 'Comunidad'}, inplace = True)
return population, correspondence_dict_population
def get_pop(com):
com = correspondence_dict_population[com]
return int(pop_df.loc[pop_df["Comunidad"]==com, 'Total'])
def tasa_mortalidad_y_letalidad(df):
df["% de letalidad"] = df['Muertes'] * 100 / df['Confirmados']
df["% Población contagiada total"] = df['Confirmados'] * 100 / df["Población"]
df["% Población fallecida total"] = df['Muertes'] * 100 / df["Población"]
def obtener_df_semanal(df):
df["Datetime"] = pd.to_datetime(df['Día'], format=df_date_format)
df["dia_sem"] = [day.weekday() for day in df['Datetime']]
df_semanal = df[df["dia_sem"] == 6].copy()
df.drop(columns= ["dia_sem", 'Datetime'], inplace = True)
df_semanal.drop(columns = ["dia_sem", 'Datetime'], inplace = True)
return df_semanal
def save_df(name, df):
df.to_csv(os.path.join(DATA_FOLDER, "final_data", name),
encoding='UTF-8',
sep=";", index= False)
def obtener_df_semanal_desacumulado(df):
dfs_desacumulados = []
for com in df["Comunidad Autónoma"].unique():
df_com = df[df['Comunidad Autónoma']==com].copy()
for column in ["Confirmados", "Muertes"]:
df_com.sort_values(by="Día", inplace = True)
df_com[column] = df_com[column].diff()
df_com.dropna(inplace = True)
dfs_desacumulados.append(df_com)
dfs_desacumulado = pd.concat(dfs_desacumulados)
dfs_desacumulado.drop(['Población',r'% de letalidad',
r'% Población contagiada total',
r'% Población fallecida total'],
inplace = True,
axis = 1)
dfs_desacumulado.sort_values(by = "Día", inplace = True)
return dfs_desacumulado
def correct_names():
def read_population_dataset_custom(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
return population
corres_dict = get_correspondence_dict(dfs[0]['Province_State'], read_population_dataset_custom()['Comunidades y Ciudades Autónomas'])
corres_dict['Valencia'] = 'Comunitat Valenciana'
corres_dict['Navarra'] = 'Comunidad Foral de Navarra'
corres_dict['Total'] = 'Total'
for key,value in corres_dict.items():
if " " == value[0]:
corres_dict[key]=value[1:]
return corres_dict
def download_all_datasets():
start_date = datetime(month = 5, day = 14, year=2020)
days = generate_days(start_date)
descargados = os.listdir(os.path.join(DATA_FOLDER,
"covid_data"))
for i, day in enumerate(days):
filename = f"{day}.csv"
if filename not in descargados:
try:
df = pd.read_csv(base_url + filename)
#df = df.loc[df['Country_Region'] == "Spain"]
df.to_csv(f"{DATA_FOLDER}/covid_data/{filename}", index = False)
except:
print(f"No se ha encontrado el día {day}")
download_all_datasets()
dfs = []
for file in sorted(os.listdir(os.path.join(DATA_FOLDER, "covid_data"))):
if ".csv" in file:
day = file[:-4]
df = pd.read_csv(f"{DATA_FOLDER}/covid_data/{file}")
df = df.loc[(df['Country_Region'] == "Spain") & (df['Province_State'] != "Unknown")]
df["Province_State"] = [correspondence_dict[province] for province in df["Province_State"]]
df = df[['Province_State','Country_Region',
'Last_Update','Confirmed',
'Deaths', 'Recovered',
'Active']]
df["Day"] = [format_day(day) for i in range(len(df))]
df.drop(columns = ["Active", 'Recovered', 'Last_Update'], inplace = True)
# df.to_csv("data.csv", index = False)
dfs.append(df)
df_diario_acumulado = pd.concat(dfs)
df_diario_acumulado.drop(columns=["Country_Region"], inplace = True)
df_diario_acumulado.rename(columns={"Province_State": "Comunidad Autónoma",
"Confirmed": "Confirmados",
"Deaths": "Muertes",
"Day":"Día"}, inplace = True)
pop_df, correspondence_dict_population = read_population_dataset()
df_diario_acumulado['Población'] = df_diario_acumulado["Comunidad Autónoma"].apply(lambda x: get_pop(x))
del pop_df, correspondence_dict_population
correct_communites = correct_names()
def get_total_rows(df_diario_acumulado):
dfs=[]
for day in df_diario_acumulado["Día"].unique():
df = df_diario_acumulado.loc[df_diario_acumulado['Día'] == day].copy()
total_row = {'Comunidad Autónoma':'Total', 'Confirmados':sum(df['Confirmados']),
'Muertes': sum(df['Muertes']), 'Día':day, 'Población':sum(df['Población'])}
df = df.append(total_row, ignore_index = True)
dfs.append(df)
df = pd.concat(dfs)
return df.sort_values(by='Día')
df_diario_acumulado = get_total_rows(df_diario_acumulado)
tasa_mortalidad_y_letalidad(df_diario_acumulado)
df_diario_acumulado['Comunidad/Ciudad Autónoma'] = [correct_communites[com] for com in df_diario_acumulado['Comunidad Autónoma']]
df_semanal_acumulado = obtener_df_semanal(df_diario_acumulado)
df_semanal_desacumulado = obtener_df_semanal_desacumulado(df_semanal_acumulado)
save_df('diario_acumulado.csv', df_diario_acumulado)
save_df('semanal_acumulado.csv', df_semanal_acumulado)
save_df('semanal_desacumulado.csv', df_semanal_desacumulado) | lt.append(start_date.strftime(date_format))
start_date += step
| conditional_block |
download_data.py | import requests
from datetime import datetime, timedelta
import pandas as pd
import json
import os
from difflib import SequenceMatcher
import time
DATA_FOLDER = "./data"
base_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/"
date_format ='%m-%d-%Y' #01-22-2020.csv
df_date_format = '%Y-%m-%d'
communites_list = ['Castilla-Leon',
'Cataluña',
'Ceuta',
'Murcia',
'La Rioja',
'Baleares',
'Canarias',
'Cantabria',
'Andalucia',
'Asturias',
'Valencia',
'Melilla',
'Navarra',
'Galicia',
'Aragon',
'Madrid',
'Extremadura',
'Castilla-La Mancha',
'Pais Vasco']
#communites_geojson = read_communites_geojson()
correspondence_dict = {'Andalusia': 'Andalucia',
'Aragon': 'Aragon',
'Asturias': 'Asturias',
'Baleares': 'Baleares',
'C. Valenciana': 'Valencia',
'Canarias': 'Canarias',
'Cantabria': 'Cantabria',
'Castilla - La Mancha': 'Castilla-La Mancha',
'Castilla y Leon': 'Castilla-Leon',
'Catalonia': 'Cataluña',
'Ceuta': 'Ceuta',
'Extremadura': 'Extremadura',
'Galicia': 'Galicia',
'La Rioja': 'La Rioja',
'Madrid': 'Madrid',
'Melilla': 'Melilla',
'Murcia': 'Murcia',
'Navarra': 'Navarra',
'Pais Vasco': 'Pais Vasco'}
#communities_geojson = read_communites_geojson("spain-communites-v2")
def correspondence_string(string, list_to_match):
current_ratio = 0
return_string = None
for string_from_list in list_to_match:
ratio = SequenceMatcher(None, string, string_from_list).ratio()
if ratio > current_ratio:
current_ratio = ratio
return_string = string_from_list
return return_string
def get_correspondence_dict(covid_communities_name, string_list = communites_list):
dic_correspondence = {}
for original_string in covid_communities_name:
dic_correspondence[original_string] = correspondence_string(original_string, string_list)
return dic_correspondence
#get_correspondence_dict(dfs[0]["Province_State"])
def format_day(day_str):
day = datetime.strptime(day_str, "%m-%d-%Y")
return datetime.strftime(day, "%Y-%m-%d")
def read_communites_geojson(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
nameunit = region["properties"]["nameunit"]
if "/" in nameunit:
region["properties"]["nameunit"] = nameunit.split("/")[0]
if 'Ciudad Autónoma de Ceuta' in nameunit:
region["properties"]["nameunit"] = "Ceuta"
elif 'Ciudad Autónoma de Melilla' in nameunit:
region["properties"]["nameunit"] = "Melilla"
elif 'Comunidad Foral de Navarra' in nameunit:
region["properties"]["nameunit"] = "Navarra"
communites.append(region["properties"]["nameunit"])
return geojson, communites
def get_communites(geojson):
regions = []
for region in geojson['features']:
if region["properties"]["name"] == "Valencia":
region["properties"]["name"] = "C. Valenciana"
regions.append(region["properties"]["name"])
return regions
def generate_days(start_date):
end_date = datetime.now()
step = timedelta(days=1)
result = []
while start_date < end_date:
result.append(start_date.strftime(date_format))
start_date += step
return result
#download_all_datasets()
def add_to_list(l, lat_sum, lon_sum):
# For moving Canary Islands near Spain.
# l is the list of list of lists .... of coordinates
if isinstance(l, list) and isinstance(l[0], float) and isinstance(l[1], float):
return l[0] + lat_sum, l[1] + lon_sum
return [add_to_list(sub, lat_sum, lon_sum) for sub in l]
def reduce_precission(l, ndigits):
if not isinstance(l, list):
return round(l, ndigits)
return [reduce_precission(sub, ndigits) for sub in l]
def read_original_to_displaced_canaries():
lat_sum = 6.65456
lon_sum = 5.65412
geojson,b = read_communites_geojson_v1('spain-communities')
for region in geojson['features']:
name = region["properties"]["name"]
if name == "Canarias":
region["geometry"]["coordinates"] = add_to_list(region["geometry"]["coordinates"],
lat_sum,
lon_sum)
with open(f"./data/geojson/spain-communites-displaced-canary.geojson", "w") as f:
json.dump(geojson, f)
def read_communites_geojson_v1(name = "spain-communities" ):
with open(f"./data/geojson/{name}.geojson") as f:
geojson = json.load(f)
communites = []
for region in geojson['features']:
name = region["properties"]["name"]
communites.append(name)
return geojson, communites
#communities_geojson,b = read_communites_geojson_v1()
def read_population_dataset(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
#population = population[population["Periodo"] == 2019]
#population = population[population["Sexo"] == "Total"]
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunitat Valenciana", 'Comunidades y Ciudades Autónomas'] = 'Valencia'
population.loc[population['Comunidades y Ciudades Autónomas'] == "Comunidad Foral de Navarra", 'Comunidades y Ciudades Autónomas'] = 'Navarra'
correspondence_dict_population = get_correspondence_dict(df_diario_acumulado["Comunidad Autónoma"].unique(),population['Comunidades y Ciudades Autónomas'])
population.rename(columns = {'Comunidades y Ciudades Autónomas': 'Comunidad'}, inplace = True)
return population, correspondence_dict_population
def get_pop(com):
com = correspondence_dict_population[com]
return int(pop_df.loc[pop_df["Comunidad"]==com, 'Total'])
def tasa_mortalidad_y_letalidad(df):
df["% de letalidad"] = df['Muertes'] * 100 / df['Confirmados']
df["% Población contagiada total"] = df['Confirmados'] * 100 / df["Población"]
df["% Población fallecida total"] = df['Muertes'] * 100 / df["Población"]
def obtener_df_semanal(df):
df["Datetime"] = pd.to_datetime(df['Día'], format=df_date_format)
df["dia_sem"] = [day.weekday() for day in df['Datetime']]
df_semanal = df[df["dia_sem"] == 6].copy()
df.drop(columns= ["dia_sem", 'Datetime'], inplace = True)
df_semanal.drop(columns = ["dia_sem", 'Datetime'], inplace = True)
return df_semanal
def save_df(name, df):
df.to_csv(os.path.join(DATA_FOLDER, "final_data", name),
encoding='UTF-8',
sep=";", index= False)
def obtener_df_semanal_desacumulado(df):
dfs_desacumulados = []
for com in df["Comunidad Autónoma"].unique():
df_com = df[df['Comunidad Autónoma']==com].copy()
for column in ["Confirmados", "Muertes"]:
df_com.sort_values(by="Día", inplace = True)
df_com[column] = df_com[column].diff()
df_com.dropna(inplace = True)
dfs_desacumulados.append(df_com)
dfs_desacumulado = pd.concat(dfs_desacumulados)
dfs_desacumulado.drop(['Población',r'% de letalidad', | r'% Población fallecida total'],
inplace = True,
axis = 1)
dfs_desacumulado.sort_values(by = "Día", inplace = True)
return dfs_desacumulado
def correct_names():
def read_population_dataset_custom(name = 'spain-communities-2020.csv'):
def clean_name_pop(name):
if " " in name:
name = " ".join(name.split(" ")[1:])
if "," in name:
split = name.split(",")
name = " ".join(split[1:] + [split[0]])
return name
def clean_pop_pop(pop):
if "." in pop:
return int(pop.replace(".",""))
population = pd.read_csv(f"./data/population/{name}", sep=";")
population.drop(columns=['Periodo', 'Sexo'], inplace=True)
population['Comunidades y Ciudades Autónomas'] = [clean_name_pop(name) for name in population['Comunidades y Ciudades Autónomas'] ]
population["Total"] = [clean_pop_pop(pop) for pop in population["Total"]]
return population
corres_dict = get_correspondence_dict(dfs[0]['Province_State'], read_population_dataset_custom()['Comunidades y Ciudades Autónomas'])
corres_dict['Valencia'] = 'Comunitat Valenciana'
corres_dict['Navarra'] = 'Comunidad Foral de Navarra'
corres_dict['Total'] = 'Total'
for key,value in corres_dict.items():
if " " == value[0]:
corres_dict[key]=value[1:]
return corres_dict
def download_all_datasets():
start_date = datetime(month = 5, day = 14, year=2020)
days = generate_days(start_date)
descargados = os.listdir(os.path.join(DATA_FOLDER,
"covid_data"))
for i, day in enumerate(days):
filename = f"{day}.csv"
if filename not in descargados:
try:
df = pd.read_csv(base_url + filename)
#df = df.loc[df['Country_Region'] == "Spain"]
df.to_csv(f"{DATA_FOLDER}/covid_data/{filename}", index = False)
except:
print(f"No se ha encontrado el día {day}")
download_all_datasets()
dfs = []
for file in sorted(os.listdir(os.path.join(DATA_FOLDER, "covid_data"))):
if ".csv" in file:
day = file[:-4]
df = pd.read_csv(f"{DATA_FOLDER}/covid_data/{file}")
df = df.loc[(df['Country_Region'] == "Spain") & (df['Province_State'] != "Unknown")]
df["Province_State"] = [correspondence_dict[province] for province in df["Province_State"]]
df = df[['Province_State','Country_Region',
'Last_Update','Confirmed',
'Deaths', 'Recovered',
'Active']]
df["Day"] = [format_day(day) for i in range(len(df))]
df.drop(columns = ["Active", 'Recovered', 'Last_Update'], inplace = True)
# df.to_csv("data.csv", index = False)
dfs.append(df)
df_diario_acumulado = pd.concat(dfs)
df_diario_acumulado.drop(columns=["Country_Region"], inplace = True)
df_diario_acumulado.rename(columns={"Province_State": "Comunidad Autónoma",
"Confirmed": "Confirmados",
"Deaths": "Muertes",
"Day":"Día"}, inplace = True)
pop_df, correspondence_dict_population = read_population_dataset()
df_diario_acumulado['Población'] = df_diario_acumulado["Comunidad Autónoma"].apply(lambda x: get_pop(x))
del pop_df, correspondence_dict_population
correct_communites = correct_names()
def get_total_rows(df_diario_acumulado):
dfs=[]
for day in df_diario_acumulado["Día"].unique():
df = df_diario_acumulado.loc[df_diario_acumulado['Día'] == day].copy()
total_row = {'Comunidad Autónoma':'Total', 'Confirmados':sum(df['Confirmados']),
'Muertes': sum(df['Muertes']), 'Día':day, 'Población':sum(df['Población'])}
df = df.append(total_row, ignore_index = True)
dfs.append(df)
df = pd.concat(dfs)
return df.sort_values(by='Día')
df_diario_acumulado = get_total_rows(df_diario_acumulado)
tasa_mortalidad_y_letalidad(df_diario_acumulado)
df_diario_acumulado['Comunidad/Ciudad Autónoma'] = [correct_communites[com] for com in df_diario_acumulado['Comunidad Autónoma']]
df_semanal_acumulado = obtener_df_semanal(df_diario_acumulado)
df_semanal_desacumulado = obtener_df_semanal_desacumulado(df_semanal_acumulado)
save_df('diario_acumulado.csv', df_diario_acumulado)
save_df('semanal_acumulado.csv', df_semanal_acumulado)
save_df('semanal_desacumulado.csv', df_semanal_desacumulado) | r'% Población contagiada total', | random_line_split |
lib.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shareable Substrate types.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
/// Initialize a key-value collection from array.
///
/// Creates a vector of given pairs and calls `collect` on the iterator from it.
/// Can be used to create a `HashMap`.
#[macro_export]
macro_rules! map {
($( $name:expr => $value:expr ),* $(,)? ) => (
vec![ $( ( $name, $value ) ),* ].into_iter().collect()
);
}
#[doc(hidden)]
pub use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;
#[cfg(feature = "serde")]
pub use serde;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use sp_runtime_interface::pass_by::{PassByEnum, PassByInner};
use sp_std::{ops::Deref, prelude::*};
pub use sp_debug_derive::RuntimeDebug;
#[cfg(feature = "serde")]
pub use impl_serde::serialize as bytes;
#[cfg(feature = "full_crypto")]
pub mod hashing;
#[cfg(feature = "full_crypto")]
pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64};
pub mod crypto;
pub mod hexdisplay;
pub use paste;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
pub mod bls;
pub mod defer;
pub mod ecdsa;
pub mod ed25519;
pub mod hash;
#[cfg(feature = "std")]
mod hasher;
pub mod offchain;
pub mod sr25519;
pub mod testing;
#[cfg(feature = "std")]
pub mod traits;
pub mod uint;
#[cfg(feature = "bls-experimental")]
pub use bls::{bls377, bls381};
pub use self::{
hash::{convert_hash, H160, H256, H512},
uint::{U256, U512},
};
#[cfg(feature = "full_crypto")]
pub use crypto::{ByteArray, DeriveJunction, Pair, Public};
#[cfg(feature = "std")]
pub use self::hasher::blake2::Blake2Hasher;
#[cfg(feature = "std")]
pub use self::hasher::keccak::KeccakHasher;
pub use hash_db::Hasher;
pub use bounded_collections as bounded;
#[cfg(feature = "std")]
pub use bounded_collections::{bounded_btree_map, bounded_vec};
pub use bounded_collections::{
parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128,
ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet,
};
pub use sp_storage as storage;
#[doc(hidden)]
pub use sp_std;
/// Hex-serialized shim for `Vec<u8>`.
#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
pub struct Bytes(#[cfg_attr(feature = "serde", serde(with = "bytes"))] pub Vec<u8>);
impl From<Vec<u8>> for Bytes {
fn from(s: Vec<u8>) -> Self {
Bytes(s)
}
}
impl From<OpaqueMetadata> for Bytes {
fn from(s: OpaqueMetadata) -> Self {
Bytes(s.0)
}
}
impl Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.0[..]
}
}
impl codec::WrapperTypeEncode for Bytes {}
impl codec::WrapperTypeDecode for Bytes {
type Wrapped = Vec<u8>;
}
#[cfg(feature = "std")]
impl sp_std::str::FromStr for Bytes {
type Err = bytes::FromHexError;
fn | (s: &str) -> Result<Self, Self::Err> {
bytes::from_hex(s).map(Bytes)
}
}
/// Stores the encoded `RuntimeMetadata` for the native side as opaque type.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
pub struct OpaqueMetadata(Vec<u8>);
impl OpaqueMetadata {
/// Creates a new instance with the given metadata blob.
pub fn new(metadata: Vec<u8>) -> Self {
OpaqueMetadata(metadata)
}
}
impl sp_std::ops::Deref for OpaqueMetadata {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Simple blob to hold a `PeerId` without committing to its format.
#[derive(
Default,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
PassByInner,
TypeInfo,
)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpaquePeerId(pub Vec<u8>);
impl OpaquePeerId {
/// Create new `OpaquePeerId`
pub fn new(vec: Vec<u8>) -> Self {
OpaquePeerId(vec)
}
}
/// Provide a simple 4 byte identifier for a type.
pub trait TypeId {
/// Simple 4 byte identifier.
const TYPE_ID: [u8; 4];
}
/// A log level matching the one from `log` crate.
///
/// Used internally by `sp_io::logging::log` method.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevel {
/// `Error` log level.
Error = 1_isize,
/// `Warn` log level.
Warn = 2_isize,
/// `Info` log level.
Info = 3_isize,
/// `Debug` log level.
Debug = 4_isize,
/// `Trace` log level.
Trace = 5_isize,
}
impl From<u32> for LogLevel {
fn from(val: u32) -> Self {
match val {
x if x == LogLevel::Warn as u32 => LogLevel::Warn,
x if x == LogLevel::Info as u32 => LogLevel::Info,
x if x == LogLevel::Debug as u32 => LogLevel::Debug,
x if x == LogLevel::Trace as u32 => LogLevel::Trace,
_ => LogLevel::Error,
}
}
}
impl From<log::Level> for LogLevel {
fn from(l: log::Level) -> Self {
use log::Level::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<LogLevel> for log::Level {
fn from(l: LogLevel) -> Self {
use self::LogLevel::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Log level filter that expresses which log levels should be filtered.
///
/// This enum matches the [`log::LevelFilter`] enum.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevelFilter {
/// `Off` log level filter.
Off = 0_isize,
/// `Error` log level filter.
Error = 1_isize,
/// `Warn` log level filter.
Warn = 2_isize,
/// `Info` log level filter.
Info = 3_isize,
/// `Debug` log level filter.
Debug = 4_isize,
/// `Trace` log level filter.
Trace = 5_isize,
}
impl From<LogLevelFilter> for log::LevelFilter {
fn from(l: LogLevelFilter) -> Self {
use self::LogLevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<log::LevelFilter> for LogLevelFilter {
fn from(l: log::LevelFilter) -> Self {
use log::LevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`.
///
/// When Substrate calls into Wasm it expects a fixed signature for functions exported
/// from the Wasm blob. The return value of this signature is always a `u64`.
/// This `u64` stores the pointer to the encoded return value and the length of this encoded value.
/// The low `32bits` are reserved for the pointer, followed by `32bit` for the length.
#[cfg(not(feature = "std"))]
pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
let encoded = value.encode();
let ptr = encoded.as_ptr() as u64;
let length = encoded.len() as u64;
let res = ptr | (length << 32);
// Leak the output vector to avoid it being freed.
// This is fine in a WASM context since the heap
// will be discarded after the call.
sp_std::mem::forget(encoded);
res
}
/// The void type - it cannot exist.
// Oh rust, you crack me up...
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub enum Void {}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require
/// the bound on `no_std`. This is useful for situations where you require that a type implements
/// a certain trait with `feature = std`, but not on `no_std`.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker! {
/// /// A marker for a type that implements `Debug` when `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(feature = "std")]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(feature = "std")]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(feature = "std"))]
pub trait $trait_name {}
#[cfg(not(feature = "std"))]
impl<T> $trait_name for T {}
)+
}
}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when either `feature = std` or `feature =
/// serde` is activated.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker_std_or_serde! {
/// /// A marker for a type that implements `Debug` when `feature = serde` or `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = serde` or `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker_std_or_serde {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(any(feature = "serde", feature = "std"))]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(any(feature = "serde", feature = "std"))]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(any(feature = "serde", feature = "std")))]
pub trait $trait_name {}
#[cfg(not(any(feature = "serde", feature = "std")))]
impl<T> $trait_name for T {}
)+
}
}
/// The maximum number of bytes that can be allocated at one time.
// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for
// everybody.
pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB
/// Generates a macro for checking if a certain feature is enabled.
///
/// These feature checking macros can be used to conditionally enable/disable code in a dependent
/// crate based on a feature in the crate where the macro is called.
///
/// # Example
///```
/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $);
/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $);
///
/// // All the code passed to the macro will then conditionally compiled based on the features
/// // activated for the crate where the macro was generated.
/// check_std_is_enabled! {
/// struct StdEnabled;
/// }
///```
#[macro_export]
// We need to skip formatting this macro because of this bug:
// https://github.com/rust-lang/rustfmt/issues/5283
#[rustfmt::skip]
macro_rules! generate_feature_enabled_macro {
( $macro_name:ident, $feature_name:meta, $d:tt ) => {
$crate::paste::paste!{
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg($feature_name)]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {
$d ( $d input )*
}
}
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg(not($feature_name))]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {};
}
// Work around for: <https://github.com/rust-lang/rust/pull/52234>
#[doc(hidden)]
pub use [<_ $macro_name>] as $macro_name;
}
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn generate_feature_enabled_macro_panics() {
generate_feature_enabled_macro!(if_test, test, $);
if_test!(panic!("This should panic"));
}
#[test]
fn generate_feature_enabled_macro_works() {
generate_feature_enabled_macro!(if_not_test, not(test), $);
if_not_test!(panic!("This should not panic"));
}
}
| from_str | identifier_name |
lib.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shareable Substrate types.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
/// Initialize a key-value collection from array.
///
/// Creates a vector of given pairs and calls `collect` on the iterator from it.
/// Can be used to create a `HashMap`.
#[macro_export]
macro_rules! map {
($( $name:expr => $value:expr ),* $(,)? ) => (
vec![ $( ( $name, $value ) ),* ].into_iter().collect()
);
}
#[doc(hidden)]
pub use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo; | #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use sp_runtime_interface::pass_by::{PassByEnum, PassByInner};
use sp_std::{ops::Deref, prelude::*};
pub use sp_debug_derive::RuntimeDebug;
#[cfg(feature = "serde")]
pub use impl_serde::serialize as bytes;
#[cfg(feature = "full_crypto")]
pub mod hashing;
#[cfg(feature = "full_crypto")]
pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64};
pub mod crypto;
pub mod hexdisplay;
pub use paste;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
pub mod bls;
pub mod defer;
pub mod ecdsa;
pub mod ed25519;
pub mod hash;
#[cfg(feature = "std")]
mod hasher;
pub mod offchain;
pub mod sr25519;
pub mod testing;
#[cfg(feature = "std")]
pub mod traits;
pub mod uint;
#[cfg(feature = "bls-experimental")]
pub use bls::{bls377, bls381};
pub use self::{
hash::{convert_hash, H160, H256, H512},
uint::{U256, U512},
};
#[cfg(feature = "full_crypto")]
pub use crypto::{ByteArray, DeriveJunction, Pair, Public};
#[cfg(feature = "std")]
pub use self::hasher::blake2::Blake2Hasher;
#[cfg(feature = "std")]
pub use self::hasher::keccak::KeccakHasher;
pub use hash_db::Hasher;
pub use bounded_collections as bounded;
#[cfg(feature = "std")]
pub use bounded_collections::{bounded_btree_map, bounded_vec};
pub use bounded_collections::{
parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128,
ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet,
};
pub use sp_storage as storage;
#[doc(hidden)]
pub use sp_std;
/// Hex-serialized shim for `Vec<u8>`.
#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
pub struct Bytes(#[cfg_attr(feature = "serde", serde(with = "bytes"))] pub Vec<u8>);
impl From<Vec<u8>> for Bytes {
fn from(s: Vec<u8>) -> Self {
Bytes(s)
}
}
impl From<OpaqueMetadata> for Bytes {
fn from(s: OpaqueMetadata) -> Self {
Bytes(s.0)
}
}
impl Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.0[..]
}
}
impl codec::WrapperTypeEncode for Bytes {}
impl codec::WrapperTypeDecode for Bytes {
type Wrapped = Vec<u8>;
}
#[cfg(feature = "std")]
impl sp_std::str::FromStr for Bytes {
type Err = bytes::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
bytes::from_hex(s).map(Bytes)
}
}
/// Stores the encoded `RuntimeMetadata` for the native side as opaque type.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
pub struct OpaqueMetadata(Vec<u8>);
impl OpaqueMetadata {
/// Creates a new instance with the given metadata blob.
pub fn new(metadata: Vec<u8>) -> Self {
OpaqueMetadata(metadata)
}
}
impl sp_std::ops::Deref for OpaqueMetadata {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Simple blob to hold a `PeerId` without committing to its format.
#[derive(
Default,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
PassByInner,
TypeInfo,
)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpaquePeerId(pub Vec<u8>);
impl OpaquePeerId {
/// Create new `OpaquePeerId`
pub fn new(vec: Vec<u8>) -> Self {
OpaquePeerId(vec)
}
}
/// Provide a simple 4 byte identifier for a type.
pub trait TypeId {
/// Simple 4 byte identifier.
const TYPE_ID: [u8; 4];
}
/// A log level matching the one from `log` crate.
///
/// Used internally by `sp_io::logging::log` method.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevel {
/// `Error` log level.
Error = 1_isize,
/// `Warn` log level.
Warn = 2_isize,
/// `Info` log level.
Info = 3_isize,
/// `Debug` log level.
Debug = 4_isize,
/// `Trace` log level.
Trace = 5_isize,
}
impl From<u32> for LogLevel {
fn from(val: u32) -> Self {
match val {
x if x == LogLevel::Warn as u32 => LogLevel::Warn,
x if x == LogLevel::Info as u32 => LogLevel::Info,
x if x == LogLevel::Debug as u32 => LogLevel::Debug,
x if x == LogLevel::Trace as u32 => LogLevel::Trace,
_ => LogLevel::Error,
}
}
}
impl From<log::Level> for LogLevel {
fn from(l: log::Level) -> Self {
use log::Level::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<LogLevel> for log::Level {
fn from(l: LogLevel) -> Self {
use self::LogLevel::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Log level filter that expresses which log levels should be filtered.
///
/// This enum matches the [`log::LevelFilter`] enum.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevelFilter {
/// `Off` log level filter.
Off = 0_isize,
/// `Error` log level filter.
Error = 1_isize,
/// `Warn` log level filter.
Warn = 2_isize,
/// `Info` log level filter.
Info = 3_isize,
/// `Debug` log level filter.
Debug = 4_isize,
/// `Trace` log level filter.
Trace = 5_isize,
}
impl From<LogLevelFilter> for log::LevelFilter {
fn from(l: LogLevelFilter) -> Self {
use self::LogLevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<log::LevelFilter> for LogLevelFilter {
fn from(l: log::LevelFilter) -> Self {
use log::LevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`.
///
/// When Substrate calls into Wasm it expects a fixed signature for functions exported
/// from the Wasm blob. The return value of this signature is always a `u64`.
/// This `u64` stores the pointer to the encoded return value and the length of this encoded value.
/// The low `32bits` are reserved for the pointer, followed by `32bit` for the length.
#[cfg(not(feature = "std"))]
pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
let encoded = value.encode();
let ptr = encoded.as_ptr() as u64;
let length = encoded.len() as u64;
let res = ptr | (length << 32);
// Leak the output vector to avoid it being freed.
// This is fine in a WASM context since the heap
// will be discarded after the call.
sp_std::mem::forget(encoded);
res
}
/// The void type - it cannot exist.
// Oh rust, you crack me up...
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub enum Void {}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require
/// the bound on `no_std`. This is useful for situations where you require that a type implements
/// a certain trait with `feature = std`, but not on `no_std`.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker! {
/// /// A marker for a type that implements `Debug` when `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(feature = "std")]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(feature = "std")]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(feature = "std"))]
pub trait $trait_name {}
#[cfg(not(feature = "std"))]
impl<T> $trait_name for T {}
)+
}
}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when either `feature = std` or `feature =
/// serde` is activated.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker_std_or_serde! {
/// /// A marker for a type that implements `Debug` when `feature = serde` or `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = serde` or `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker_std_or_serde {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(any(feature = "serde", feature = "std"))]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(any(feature = "serde", feature = "std"))]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(any(feature = "serde", feature = "std")))]
pub trait $trait_name {}
#[cfg(not(any(feature = "serde", feature = "std")))]
impl<T> $trait_name for T {}
)+
}
}
/// The maximum number of bytes that can be allocated at one time.
// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for
// everybody.
pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB
/// Generates a macro for checking if a certain feature is enabled.
///
/// These feature checking macros can be used to conditionally enable/disable code in a dependent
/// crate based on a feature in the crate where the macro is called.
///
/// # Example
///```
/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $);
/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $);
///
/// // All the code passed to the macro will then conditionally compiled based on the features
/// // activated for the crate where the macro was generated.
/// check_std_is_enabled! {
/// struct StdEnabled;
/// }
///```
#[macro_export]
// We need to skip formatting this macro because of this bug:
// https://github.com/rust-lang/rustfmt/issues/5283
#[rustfmt::skip]
macro_rules! generate_feature_enabled_macro {
( $macro_name:ident, $feature_name:meta, $d:tt ) => {
$crate::paste::paste!{
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg($feature_name)]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {
$d ( $d input )*
}
}
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg(not($feature_name))]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {};
}
// Work around for: <https://github.com/rust-lang/rust/pull/52234>
#[doc(hidden)]
pub use [<_ $macro_name>] as $macro_name;
}
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn generate_feature_enabled_macro_panics() {
generate_feature_enabled_macro!(if_test, test, $);
if_test!(panic!("This should panic"));
}
#[test]
fn generate_feature_enabled_macro_works() {
generate_feature_enabled_macro!(if_not_test, not(test), $);
if_not_test!(panic!("This should not panic"));
}
} | #[cfg(feature = "serde")]
pub use serde; | random_line_split |
lib.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shareable Substrate types.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
/// Initialize a key-value collection from array.
///
/// Creates a vector of given pairs and calls `collect` on the iterator from it.
/// Can be used to create a `HashMap`.
#[macro_export]
macro_rules! map {
($( $name:expr => $value:expr ),* $(,)? ) => (
vec![ $( ( $name, $value ) ),* ].into_iter().collect()
);
}
#[doc(hidden)]
pub use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;
#[cfg(feature = "serde")]
pub use serde;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use sp_runtime_interface::pass_by::{PassByEnum, PassByInner};
use sp_std::{ops::Deref, prelude::*};
pub use sp_debug_derive::RuntimeDebug;
#[cfg(feature = "serde")]
pub use impl_serde::serialize as bytes;
#[cfg(feature = "full_crypto")]
pub mod hashing;
#[cfg(feature = "full_crypto")]
pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64};
pub mod crypto;
pub mod hexdisplay;
pub use paste;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
pub mod bls;
pub mod defer;
pub mod ecdsa;
pub mod ed25519;
pub mod hash;
#[cfg(feature = "std")]
mod hasher;
pub mod offchain;
pub mod sr25519;
pub mod testing;
#[cfg(feature = "std")]
pub mod traits;
pub mod uint;
#[cfg(feature = "bls-experimental")]
pub use bls::{bls377, bls381};
pub use self::{
hash::{convert_hash, H160, H256, H512},
uint::{U256, U512},
};
#[cfg(feature = "full_crypto")]
pub use crypto::{ByteArray, DeriveJunction, Pair, Public};
#[cfg(feature = "std")]
pub use self::hasher::blake2::Blake2Hasher;
#[cfg(feature = "std")]
pub use self::hasher::keccak::KeccakHasher;
pub use hash_db::Hasher;
pub use bounded_collections as bounded;
#[cfg(feature = "std")]
pub use bounded_collections::{bounded_btree_map, bounded_vec};
pub use bounded_collections::{
parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128,
ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet,
};
pub use sp_storage as storage;
#[doc(hidden)]
pub use sp_std;
/// Hex-serialized shim for `Vec<u8>`.
#[derive(PartialEq, Eq, Clone, RuntimeDebug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
pub struct Bytes(#[cfg_attr(feature = "serde", serde(with = "bytes"))] pub Vec<u8>);
impl From<Vec<u8>> for Bytes {
fn from(s: Vec<u8>) -> Self {
Bytes(s)
}
}
impl From<OpaqueMetadata> for Bytes {
fn from(s: OpaqueMetadata) -> Self {
Bytes(s.0)
}
}
impl Deref for Bytes {
type Target = [u8];
fn deref(&self) -> &[u8] |
}
impl codec::WrapperTypeEncode for Bytes {}
impl codec::WrapperTypeDecode for Bytes {
type Wrapped = Vec<u8>;
}
#[cfg(feature = "std")]
impl sp_std::str::FromStr for Bytes {
type Err = bytes::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
bytes::from_hex(s).map(Bytes)
}
}
/// Stores the encoded `RuntimeMetadata` for the native side as opaque type.
#[derive(Encode, Decode, PartialEq, TypeInfo)]
pub struct OpaqueMetadata(Vec<u8>);
impl OpaqueMetadata {
/// Creates a new instance with the given metadata blob.
pub fn new(metadata: Vec<u8>) -> Self {
OpaqueMetadata(metadata)
}
}
impl sp_std::ops::Deref for OpaqueMetadata {
type Target = Vec<u8>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Simple blob to hold a `PeerId` without committing to its format.
#[derive(
Default,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
PassByInner,
TypeInfo,
)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct OpaquePeerId(pub Vec<u8>);
impl OpaquePeerId {
/// Create new `OpaquePeerId`
pub fn new(vec: Vec<u8>) -> Self {
OpaquePeerId(vec)
}
}
/// Provide a simple 4 byte identifier for a type.
pub trait TypeId {
/// Simple 4 byte identifier.
const TYPE_ID: [u8; 4];
}
/// A log level matching the one from `log` crate.
///
/// Used internally by `sp_io::logging::log` method.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevel {
/// `Error` log level.
Error = 1_isize,
/// `Warn` log level.
Warn = 2_isize,
/// `Info` log level.
Info = 3_isize,
/// `Debug` log level.
Debug = 4_isize,
/// `Trace` log level.
Trace = 5_isize,
}
impl From<u32> for LogLevel {
fn from(val: u32) -> Self {
match val {
x if x == LogLevel::Warn as u32 => LogLevel::Warn,
x if x == LogLevel::Info as u32 => LogLevel::Info,
x if x == LogLevel::Debug as u32 => LogLevel::Debug,
x if x == LogLevel::Trace as u32 => LogLevel::Trace,
_ => LogLevel::Error,
}
}
}
impl From<log::Level> for LogLevel {
fn from(l: log::Level) -> Self {
use log::Level::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<LogLevel> for log::Level {
fn from(l: LogLevel) -> Self {
use self::LogLevel::*;
match l {
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Log level filter that expresses which log levels should be filtered.
///
/// This enum matches the [`log::LevelFilter`] enum.
#[derive(Encode, Decode, PassByEnum, Copy, Clone)]
pub enum LogLevelFilter {
/// `Off` log level filter.
Off = 0_isize,
/// `Error` log level filter.
Error = 1_isize,
/// `Warn` log level filter.
Warn = 2_isize,
/// `Info` log level filter.
Info = 3_isize,
/// `Debug` log level filter.
Debug = 4_isize,
/// `Trace` log level filter.
Trace = 5_isize,
}
impl From<LogLevelFilter> for log::LevelFilter {
fn from(l: LogLevelFilter) -> Self {
use self::LogLevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
impl From<log::LevelFilter> for LogLevelFilter {
fn from(l: log::LevelFilter) -> Self {
use log::LevelFilter::*;
match l {
Off => Self::Off,
Error => Self::Error,
Warn => Self::Warn,
Info => Self::Info,
Debug => Self::Debug,
Trace => Self::Trace,
}
}
}
/// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`.
///
/// When Substrate calls into Wasm it expects a fixed signature for functions exported
/// from the Wasm blob. The return value of this signature is always a `u64`.
/// This `u64` stores the pointer to the encoded return value and the length of this encoded value.
/// The low `32bits` are reserved for the pointer, followed by `32bit` for the length.
#[cfg(not(feature = "std"))]
pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 {
let encoded = value.encode();
let ptr = encoded.as_ptr() as u64;
let length = encoded.len() as u64;
let res = ptr | (length << 32);
// Leak the output vector to avoid it being freed.
// This is fine in a WASM context since the heap
// will be discarded after the call.
sp_std::mem::forget(encoded);
res
}
/// The void type - it cannot exist.
// Oh rust, you crack me up...
#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
pub enum Void {}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require
/// the bound on `no_std`. This is useful for situations where you require that a type implements
/// a certain trait with `feature = std`, but not on `no_std`.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker! {
/// /// A marker for a type that implements `Debug` when `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(feature = "std")]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(feature = "std")]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(feature = "std"))]
pub trait $trait_name {}
#[cfg(not(feature = "std"))]
impl<T> $trait_name for T {}
)+
}
}
/// Macro for creating `Maybe*` marker traits.
///
/// Such a maybe-marker trait requires the given bound when either `feature = std` or `feature =
/// serde` is activated.
///
/// # Example
///
/// ```
/// sp_core::impl_maybe_marker_std_or_serde! {
/// /// A marker for a type that implements `Debug` when `feature = serde` or `feature = std`.
/// trait MaybeDebug: std::fmt::Debug;
/// /// A marker for a type that implements `Debug + Display` when `feature = serde` or `feature = std`.
/// trait MaybeDebugDisplay: std::fmt::Debug, std::fmt::Display;
/// }
/// ```
#[macro_export]
macro_rules! impl_maybe_marker_std_or_serde {
(
$(
$(#[$doc:meta] )+
trait $trait_name:ident: $( $trait_bound:path ),+;
)+
) => {
$(
$(#[$doc])+
#[cfg(any(feature = "serde", feature = "std"))]
pub trait $trait_name: $( $trait_bound + )+ {}
#[cfg(any(feature = "serde", feature = "std"))]
impl<T: $( $trait_bound + )+> $trait_name for T {}
$(#[$doc])+
#[cfg(not(any(feature = "serde", feature = "std")))]
pub trait $trait_name {}
#[cfg(not(any(feature = "serde", feature = "std")))]
impl<T> $trait_name for T {}
)+
}
}
/// The maximum number of bytes that can be allocated at one time.
// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for
// everybody.
pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB
/// Generates a macro for checking if a certain feature is enabled.
///
/// These feature checking macros can be used to conditionally enable/disable code in a dependent
/// crate based on a feature in the crate where the macro is called.
///
/// # Example
///```
/// sp_core::generate_feature_enabled_macro!(check_std_is_enabled, feature = "std", $);
/// sp_core::generate_feature_enabled_macro!(check_std_or_serde_is_enabled, any(feature = "std", feature = "serde"), $);
///
/// // All the code passed to the macro will then conditionally compiled based on the features
/// // activated for the crate where the macro was generated.
/// check_std_is_enabled! {
/// struct StdEnabled;
/// }
///```
#[macro_export]
// We need to skip formatting this macro because of this bug:
// https://github.com/rust-lang/rustfmt/issues/5283
#[rustfmt::skip]
macro_rules! generate_feature_enabled_macro {
( $macro_name:ident, $feature_name:meta, $d:tt ) => {
$crate::paste::paste!{
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg($feature_name)]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {
$d ( $d input )*
}
}
/// Enable/disable the given code depending on
#[doc = concat!("`", stringify!($feature_name), "`")]
/// being enabled for the crate or not.
///
/// # Example
///
/// ```nocompile
/// // Will add the code depending on the feature being enabled or not.
#[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")]
/// ```
#[cfg(not($feature_name))]
#[macro_export]
macro_rules! [<_ $macro_name>] {
( $d ( $d input:tt )* ) => {};
}
// Work around for: <https://github.com/rust-lang/rust/pull/52234>
#[doc(hidden)]
pub use [<_ $macro_name>] as $macro_name;
}
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn generate_feature_enabled_macro_panics() {
generate_feature_enabled_macro!(if_test, test, $);
if_test!(panic!("This should panic"));
}
#[test]
fn generate_feature_enabled_macro_works() {
generate_feature_enabled_macro!(if_not_test, not(test), $);
if_not_test!(panic!("This should not panic"));
}
}
| {
&self.0[..]
} | identifier_body |
api.rs | //! This API contains all you will need to interface your
//! your bot algorithm with the GTPv2 protocol.
//! Your main task will be to implement the GoBot trait.
use std::str::FromStr;
use std::vec::Vec;
/// Contains all the possible errors your bot
/// may return to the library.
/// Be careful, any callback returning an error it is not
/// supposed to will cause the lib to `panic!()`.
pub enum GTPError {
NotImplemented,
InvalidBoardSize,
InvalidMove,
BadVertexList,
BoardNotEmpty,
CannotUndo,
CannotScore,
}
/// Represents a player, Black or White.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Colour {
Black,
White
}
/// Represents a vertex of the board.
/// Note that board size is at most 25x25.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Vertex {
x: u8, // letter
y: u8 // number
}
/// Represents a move, either placing a stone, passing or resigning.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Move {
Stone(Vertex),
Pass,
Resign
}
/// Represents a move associated with a player.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct ColouredMove {
pub player: Colour,
pub mov: Move
}
/// The status of a stone : alive, dead or seki.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum StoneStatus {
Alive,
Seki,
Dead
}
/// This is the trait ised by the library to callback your bot.
/// You must implement some functions, the provided one correspond
/// to the optionnal commands of the protocol. If you want to
/// implement them, simply override them. If you do not, the library
/// will not report them as available.
pub trait Gtp {
/// The name of your bot (ex : "My super Bot")
fn name(&self) -> String;
/// The version of your bot (ex : "v2.3-r5")
fn version(&self) -> String;
// Any function returning a GTPError that it is not supposed
// to return will be fatal to the framework.
// Basic functions, must be implemented
| fn komi(&mut self, komi: f32) -> ();
/// Sets the board size.
/// Returns `Err(InvalidBoardSize)` if the size is not supported.
/// The protocol cannot handle board sizes > 25x25.
fn boardsize(&mut self, size: usize) -> Result<(), GTPError>;
/// Plays the provided move on the board.
/// Returns `Err(InvalidMove)` is the move is invalid.
/// The protocol does not forbid the same player player twice in a row.
fn play(&mut self, mov: ColouredMove) -> Result<(), GTPError>;
/// Ask the bot for a move for the chose player.
/// Cannot fail, the bot must provide a move even if the last
/// played move is of the same colour.
/// Plays the move in the internal representation of the game of the bot.
fn genmove(&mut self, player: Colour) -> Move;
// Optional functions, if not iplemented, the corresponding
// commands will not be activated
// All these functions will be called once by the framework
// at startup, then clear_board will be called
/// Asks the bot for a move for the chosen player.
/// Must be deterministic, and must not actually play the move.
/// Should always return `Ok(Move)`, never raise any error.
#[allow(unused_variables)]
fn reg_genmove(&self, player: Colour) -> Result<Move, GTPError> {
Err(GTPError::NotImplemented)
}
/// Undo last move if possible.
/// If not, return `Err(CannotUndo)`.
/// If undo is never possible, should not be implemented.
fn undo(&mut self) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places handicap stones for black
/// according to pre-defined patterns, see specification of GTPv2.
/// Returns a vertex of choosen stones.
/// Can fail with `Err(boardNotEmpty)`.
/// The library garanties `number` will always be between 2 and 9 included.
#[allow(unused_variables)]
fn fixed_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places its handicap stones
/// and returns a vector of Vertexes.
/// It can place less stones if the asked number is too high.
/// Can fail with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty
#[allow(unused_variables)]
fn place_free_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Uses the provided list as handicap stones for black.
/// Fails with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty.
/// Fails with `Err(BadVertexList)` if the vertex list is unusable
/// (two stones at the same place, or stones outside the board).
#[allow(unused_variables)]
fn set_free_handicap(&mut self, stones: &[Vertex]) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Sets the time settings for the game.
/// It is only informative, the bot should count it's own time,
/// but the controller is supposed to enforce it.
/// Time are give in minute, should never fail.
#[allow(unused_variables)]
fn time_settings(&mut self, main_time: usize, byoyomi_time: usize, byoyomi_stones: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a vector of stones of both color in the given status,
/// in the opinion of the bot.
/// Should never fail.
#[allow(unused_variables)]
fn final_status_list(&self, status: StoneStatus) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Computes the bot's calculation of the final score.
/// If it is a draw, float value must be 0 and colour is not important.
/// Can fail with èErr(CannotScore)`.
fn final_score(&self) -> Result<(f32, Colour), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a description of the board as saw by the bot :
/// (boardsize, black_stones, white_stones, black_captured_count, white_captured_count).
/// Should never fail.
fn showboard(&self) -> Result<(usize, Vec<Vertex>, Vec<Vertex>, usize, usize), GTPError> {
Err(GTPError::NotImplemented)
}
/// Allow you to handle custom commands. Returns (succes, output).
#[allow(unused_variables)]
fn custom_command(&mut self, command: &str, args: &str) -> (bool, String) {
(false, "invalid command".to_string())
}
/// Returns true if the given custom command is known.
#[allow(unused_variables)]
fn known_custom_command(&self, command: &str) -> bool {
false
}
/// Returns the list of you custom commands.
fn list_custom_commands(&self) -> Vec<String> {
Vec::new()
}
#[allow(unused_variables)]
fn loadsgf(&mut self, &str, n: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
}
// Vertex implementation for messing with strings
impl Vertex {
/// Creates a vertex from 2 numerical coords.
/// Both must be between 1 and 25.
pub fn from_coords(x: u8, y:u8) -> Option<Vertex> {
if x == 0 || x > 25 || y == 0 || y > 25 {
None
} else {
Some(Vertex{x: x, y: y})
}
}
/// Creates a vertex from board coordinates (from A1 to Z25).
/// Remember that letter I is banned.
pub fn from_str(text: &str) -> Option<Vertex> {
if text.len() < 2 || text.len() > 3 {
return None;
}
let mut x: u8 = text.as_bytes()[0];
if x < ('A' as u8) || x > ('Z' as u8) || (x as char) == 'I' {
return None;
}
x -= ('A' as u8) - 1;
if x > 9 {
x -= 1;
} // eliminate 'I'
let number = u8::from_str(&text[1..]);
let mut y: u8 = 0;
match number {
Ok(num) => y = num,
_ => (),
}
if y == 0 || y > 25 {
return None;
}
Some(Vertex{x: x, y: y})
}
/// Returns a tuple of coordinates.
pub fn to_coords(&self) -> (u8, u8) {
(self.x, self.y)
}
/// Returns the string representation of this vertex (ex: G12).
pub fn to_string(&self) -> String {
let mut letter: u8 = 'A' as u8;
if self.x >= 9 {
// eliminate 'I'
letter += self.x;
} else {
letter += self.x-1;
}
format!("{}{}", letter as char, self.y)
}
}
impl Move {
/// Returns a string representation of the move compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Move::Stone(vrtx) => vrtx.to_string(),
Move::Pass => "pass".to_string(),
Move::Resign => "resign".to_string(),
}
}
}
impl Colour {
/// Returns a string representation of the color compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Colour::White => "white".to_string(),
Colour::Black => "black".to_string(),
}
}
}
impl ColouredMove {
/// Returns a string representation of the colored move compatible
/// with GTPv2.
pub fn to_string(&self) -> String {
self.player.to_string() + &self.mov.to_string()
}
}
#[cfg(test)]
mod tests {
#[test]
fn vertex_to_string() {
let vrtx1 = super::Vertex::from_coords(8u8, 7u8).unwrap();
assert_eq!(&vrtx1.to_string(), "H7");
let vrtx2 = super::Vertex::from_coords(9u8, 13u8).unwrap();
assert_eq!(&vrtx2.to_string(), "J13");
let vrtx3 = super::Vertex::from_coords(19u8, 1u8).unwrap();
assert_eq!(&vrtx3.to_string(), "T1");
}
#[test]
fn string_to_vertex() {
let vrtx1 = super::Vertex::from_str("C7").unwrap();
assert_eq!(vrtx1.to_coords(), (3u8, 7u8));
let vrtx2 = super::Vertex::from_str("J11").unwrap();
assert_eq!(vrtx2.to_coords(), (9u8, 11u8));
let vrtx3 = super::Vertex::from_str("Z25").unwrap();
assert_eq!(vrtx3.to_coords(), (25u8, 25u8));
}
#[test]
#[should_panic]
fn too_big_coordinates() {
let vrtx = super::Vertex::from_coords(26u8, 13u8).unwrap();
assert_eq!(vrtx.to_coords(), (26u8, 13u8));
}
#[test]
#[should_panic]
fn invalid_string() {
let vrtx = super::Vertex::from_str("I13").unwrap();
assert_eq!(vrtx.to_coords(), (9u8, 13u8));
}
} | /// Clears the board, can never fail.
fn clear_board(&mut self) -> ();
/// Sets the komi, can never fail, must accept absurd values. | random_line_split |
api.rs | //! This API contains all you will need to interface your
//! your bot algorithm with the GTPv2 protocol.
//! Your main task will be to implement the GoBot trait.
use std::str::FromStr;
use std::vec::Vec;
/// Contains all the possible errors your bot
/// may return to the library.
/// Be careful, any callback returning an error it is not
/// supposed to will cause the lib to `panic!()`.
pub enum GTPError {
NotImplemented,
InvalidBoardSize,
InvalidMove,
BadVertexList,
BoardNotEmpty,
CannotUndo,
CannotScore,
}
/// Represents a player, Black or White.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Colour {
Black,
White
}
/// Represents a vertex of the board.
/// Note that board size is at most 25x25.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Vertex {
x: u8, // letter
y: u8 // number
}
/// Represents a move, either placing a stone, passing or resigning.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Move {
Stone(Vertex),
Pass,
Resign
}
/// Represents a move associated with a player.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct ColouredMove {
pub player: Colour,
pub mov: Move
}
/// The status of a stone : alive, dead or seki.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum StoneStatus {
Alive,
Seki,
Dead
}
/// This is the trait ised by the library to callback your bot.
/// You must implement some functions, the provided one correspond
/// to the optionnal commands of the protocol. If you want to
/// implement them, simply override them. If you do not, the library
/// will not report them as available.
pub trait Gtp {
/// The name of your bot (ex : "My super Bot")
fn name(&self) -> String;
/// The version of your bot (ex : "v2.3-r5")
fn version(&self) -> String;
// Any function returning a GTPError that it is not supposed
// to return will be fatal to the framework.
// Basic functions, must be implemented
/// Clears the board, can never fail.
fn clear_board(&mut self) -> ();
/// Sets the komi, can never fail, must accept absurd values.
fn komi(&mut self, komi: f32) -> ();
/// Sets the board size.
/// Returns `Err(InvalidBoardSize)` if the size is not supported.
/// The protocol cannot handle board sizes > 25x25.
fn boardsize(&mut self, size: usize) -> Result<(), GTPError>;
/// Plays the provided move on the board.
/// Returns `Err(InvalidMove)` is the move is invalid.
/// The protocol does not forbid the same player player twice in a row.
fn play(&mut self, mov: ColouredMove) -> Result<(), GTPError>;
/// Ask the bot for a move for the chose player.
/// Cannot fail, the bot must provide a move even if the last
/// played move is of the same colour.
/// Plays the move in the internal representation of the game of the bot.
fn genmove(&mut self, player: Colour) -> Move;
// Optional functions, if not iplemented, the corresponding
// commands will not be activated
// All these functions will be called once by the framework
// at startup, then clear_board will be called
/// Asks the bot for a move for the chosen player.
/// Must be deterministic, and must not actually play the move.
/// Should always return `Ok(Move)`, never raise any error.
#[allow(unused_variables)]
fn reg_genmove(&self, player: Colour) -> Result<Move, GTPError> {
Err(GTPError::NotImplemented)
}
/// Undo last move if possible.
/// If not, return `Err(CannotUndo)`.
/// If undo is never possible, should not be implemented.
fn undo(&mut self) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places handicap stones for black
/// according to pre-defined patterns, see specification of GTPv2.
/// Returns a vertex of choosen stones.
/// Can fail with `Err(boardNotEmpty)`.
/// The library garanties `number` will always be between 2 and 9 included.
#[allow(unused_variables)]
fn fixed_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places its handicap stones
/// and returns a vector of Vertexes.
/// It can place less stones if the asked number is too high.
/// Can fail with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty
#[allow(unused_variables)]
fn place_free_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Uses the provided list as handicap stones for black.
/// Fails with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty.
/// Fails with `Err(BadVertexList)` if the vertex list is unusable
/// (two stones at the same place, or stones outside the board).
#[allow(unused_variables)]
fn set_free_handicap(&mut self, stones: &[Vertex]) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Sets the time settings for the game.
/// It is only informative, the bot should count it's own time,
/// but the controller is supposed to enforce it.
/// Time are give in minute, should never fail.
#[allow(unused_variables)]
fn time_settings(&mut self, main_time: usize, byoyomi_time: usize, byoyomi_stones: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a vector of stones of both color in the given status,
/// in the opinion of the bot.
/// Should never fail.
#[allow(unused_variables)]
fn final_status_list(&self, status: StoneStatus) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Computes the bot's calculation of the final score.
/// If it is a draw, float value must be 0 and colour is not important.
/// Can fail with èErr(CannotScore)`.
fn final_score(&self) -> Result<(f32, Colour), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a description of the board as saw by the bot :
/// (boardsize, black_stones, white_stones, black_captured_count, white_captured_count).
/// Should never fail.
fn showboard(&self) -> Result<(usize, Vec<Vertex>, Vec<Vertex>, usize, usize), GTPError> {
Err(GTPError::NotImplemented)
}
/// Allow you to handle custom commands. Returns (succes, output).
#[allow(unused_variables)]
fn custom_command(&mut self, command: &str, args: &str) -> (bool, String) {
(false, "invalid command".to_string())
}
/// Returns true if the given custom command is known.
#[allow(unused_variables)]
fn known_custom_command(&self, command: &str) -> bool {
false
}
/// Returns the list of you custom commands.
fn list_custom_commands(&self) -> Vec<String> {
Vec::new()
}
#[allow(unused_variables)]
fn loadsgf(&mut self, &str, n: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
}
// Vertex implementation for messing with strings
impl Vertex {
/// Creates a vertex from 2 numerical coords.
/// Both must be between 1 and 25.
pub fn from_coords(x: u8, y:u8) -> Option<Vertex> {
if x == 0 || x > 25 || y == 0 || y > 25 {
None
} else {
Some(Vertex{x: x, y: y})
}
}
/// Creates a vertex from board coordinates (from A1 to Z25).
/// Remember that letter I is banned.
pub fn from_str(text: &str) -> Option<Vertex> {
if text.len() < 2 || text.len() > 3 {
return None;
}
let mut x: u8 = text.as_bytes()[0];
if x < ('A' as u8) || x > ('Z' as u8) || (x as char) == 'I' {
return None;
}
x -= ('A' as u8) - 1;
if x > 9 { | // eliminate 'I'
let number = u8::from_str(&text[1..]);
let mut y: u8 = 0;
match number {
Ok(num) => y = num,
_ => (),
}
if y == 0 || y > 25 {
return None;
}
Some(Vertex{x: x, y: y})
}
/// Returns a tuple of coordinates.
pub fn to_coords(&self) -> (u8, u8) {
(self.x, self.y)
}
/// Returns the string representation of this vertex (ex: G12).
pub fn to_string(&self) -> String {
let mut letter: u8 = 'A' as u8;
if self.x >= 9 {
// eliminate 'I'
letter += self.x;
} else {
letter += self.x-1;
}
format!("{}{}", letter as char, self.y)
}
}
impl Move {
/// Returns a string representation of the move compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Move::Stone(vrtx) => vrtx.to_string(),
Move::Pass => "pass".to_string(),
Move::Resign => "resign".to_string(),
}
}
}
impl Colour {
/// Returns a string representation of the color compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Colour::White => "white".to_string(),
Colour::Black => "black".to_string(),
}
}
}
impl ColouredMove {
/// Returns a string representation of the colored move compatible
/// with GTPv2.
pub fn to_string(&self) -> String {
self.player.to_string() + &self.mov.to_string()
}
}
#[cfg(test)]
mod tests {
#[test]
fn vertex_to_string() {
let vrtx1 = super::Vertex::from_coords(8u8, 7u8).unwrap();
assert_eq!(&vrtx1.to_string(), "H7");
let vrtx2 = super::Vertex::from_coords(9u8, 13u8).unwrap();
assert_eq!(&vrtx2.to_string(), "J13");
let vrtx3 = super::Vertex::from_coords(19u8, 1u8).unwrap();
assert_eq!(&vrtx3.to_string(), "T1");
}
#[test]
fn string_to_vertex() {
let vrtx1 = super::Vertex::from_str("C7").unwrap();
assert_eq!(vrtx1.to_coords(), (3u8, 7u8));
let vrtx2 = super::Vertex::from_str("J11").unwrap();
assert_eq!(vrtx2.to_coords(), (9u8, 11u8));
let vrtx3 = super::Vertex::from_str("Z25").unwrap();
assert_eq!(vrtx3.to_coords(), (25u8, 25u8));
}
#[test]
#[should_panic]
fn too_big_coordinates() {
let vrtx = super::Vertex::from_coords(26u8, 13u8).unwrap();
assert_eq!(vrtx.to_coords(), (26u8, 13u8));
}
#[test]
#[should_panic]
fn invalid_string() {
let vrtx = super::Vertex::from_str("I13").unwrap();
assert_eq!(vrtx.to_coords(), (9u8, 13u8));
}
}
|
x -= 1;
} | conditional_block |
api.rs | //! This API contains all you will need to interface your
//! your bot algorithm with the GTPv2 protocol.
//! Your main task will be to implement the GoBot trait.
use std::str::FromStr;
use std::vec::Vec;
/// Contains all the possible errors your bot
/// may return to the library.
/// Be careful, any callback returning an error it is not
/// supposed to will cause the lib to `panic!()`.
pub enum GTPError {
NotImplemented,
InvalidBoardSize,
InvalidMove,
BadVertexList,
BoardNotEmpty,
CannotUndo,
CannotScore,
}
/// Represents a player, Black or White.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Colour {
Black,
White
}
/// Represents a vertex of the board.
/// Note that board size is at most 25x25.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct Vertex {
x: u8, // letter
y: u8 // number
}
/// Represents a move, either placing a stone, passing or resigning.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Move {
Stone(Vertex),
Pass,
Resign
}
/// Represents a move associated with a player.
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct ColouredMove {
pub player: Colour,
pub mov: Move
}
/// The status of a stone : alive, dead or seki.
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum StoneStatus {
Alive,
Seki,
Dead
}
/// This is the trait ised by the library to callback your bot.
/// You must implement some functions, the provided one correspond
/// to the optionnal commands of the protocol. If you want to
/// implement them, simply override them. If you do not, the library
/// will not report them as available.
pub trait Gtp {
/// The name of your bot (ex : "My super Bot")
fn name(&self) -> String;
/// The version of your bot (ex : "v2.3-r5")
fn version(&self) -> String;
// Any function returning a GTPError that it is not supposed
// to return will be fatal to the framework.
// Basic functions, must be implemented
/// Clears the board, can never fail.
fn clear_board(&mut self) -> ();
/// Sets the komi, can never fail, must accept absurd values.
fn komi(&mut self, komi: f32) -> ();
/// Sets the board size.
/// Returns `Err(InvalidBoardSize)` if the size is not supported.
/// The protocol cannot handle board sizes > 25x25.
fn boardsize(&mut self, size: usize) -> Result<(), GTPError>;
/// Plays the provided move on the board.
/// Returns `Err(InvalidMove)` is the move is invalid.
/// The protocol does not forbid the same player player twice in a row.
fn play(&mut self, mov: ColouredMove) -> Result<(), GTPError>;
/// Ask the bot for a move for the chose player.
/// Cannot fail, the bot must provide a move even if the last
/// played move is of the same colour.
/// Plays the move in the internal representation of the game of the bot.
fn genmove(&mut self, player: Colour) -> Move;
// Optional functions, if not iplemented, the corresponding
// commands will not be activated
// All these functions will be called once by the framework
// at startup, then clear_board will be called
/// Asks the bot for a move for the chosen player.
/// Must be deterministic, and must not actually play the move.
/// Should always return `Ok(Move)`, never raise any error.
#[allow(unused_variables)]
fn reg_genmove(&self, player: Colour) -> Result<Move, GTPError> {
Err(GTPError::NotImplemented)
}
/// Undo last move if possible.
/// If not, return `Err(CannotUndo)`.
/// If undo is never possible, should not be implemented.
fn undo(&mut self) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places handicap stones for black
/// according to pre-defined patterns, see specification of GTPv2.
/// Returns a vertex of choosen stones.
/// Can fail with `Err(boardNotEmpty)`.
/// The library garanties `number` will always be between 2 and 9 included.
#[allow(unused_variables)]
fn fixed_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// The bot places its handicap stones
/// and returns a vector of Vertexes.
/// It can place less stones if the asked number is too high.
/// Can fail with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty
#[allow(unused_variables)]
fn place_free_handicap(&mut self, number: usize) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Uses the provided list as handicap stones for black.
/// Fails with `Err(apt::GTPError::BoardNotEmpty)` if board isn't empty.
/// Fails with `Err(BadVertexList)` if the vertex list is unusable
/// (two stones at the same place, or stones outside the board).
#[allow(unused_variables)]
fn set_free_handicap(&mut self, stones: &[Vertex]) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Sets the time settings for the game.
/// It is only informative, the bot should count it's own time,
/// but the controller is supposed to enforce it.
/// Time are give in minute, should never fail.
#[allow(unused_variables)]
fn time_settings(&mut self, main_time: usize, byoyomi_time: usize, byoyomi_stones: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a vector of stones of both color in the given status,
/// in the opinion of the bot.
/// Should never fail.
#[allow(unused_variables)]
fn | (&self, status: StoneStatus) -> Result<Vec<Vertex>, GTPError> {
Err(GTPError::NotImplemented)
}
/// Computes the bot's calculation of the final score.
/// If it is a draw, float value must be 0 and colour is not important.
/// Can fail with èErr(CannotScore)`.
fn final_score(&self) -> Result<(f32, Colour), GTPError> {
Err(GTPError::NotImplemented)
}
/// Returns a description of the board as saw by the bot :
/// (boardsize, black_stones, white_stones, black_captured_count, white_captured_count).
/// Should never fail.
fn showboard(&self) -> Result<(usize, Vec<Vertex>, Vec<Vertex>, usize, usize), GTPError> {
Err(GTPError::NotImplemented)
}
/// Allow you to handle custom commands. Returns (succes, output).
#[allow(unused_variables)]
fn custom_command(&mut self, command: &str, args: &str) -> (bool, String) {
(false, "invalid command".to_string())
}
/// Returns true if the given custom command is known.
#[allow(unused_variables)]
fn known_custom_command(&self, command: &str) -> bool {
false
}
/// Returns the list of you custom commands.
fn list_custom_commands(&self) -> Vec<String> {
Vec::new()
}
#[allow(unused_variables)]
fn loadsgf(&mut self, &str, n: usize) -> Result<(), GTPError> {
Err(GTPError::NotImplemented)
}
}
// Vertex implementation for messing with strings
impl Vertex {
/// Creates a vertex from 2 numerical coords.
/// Both must be between 1 and 25.
pub fn from_coords(x: u8, y:u8) -> Option<Vertex> {
if x == 0 || x > 25 || y == 0 || y > 25 {
None
} else {
Some(Vertex{x: x, y: y})
}
}
/// Creates a vertex from board coordinates (from A1 to Z25).
/// Remember that letter I is banned.
pub fn from_str(text: &str) -> Option<Vertex> {
if text.len() < 2 || text.len() > 3 {
return None;
}
let mut x: u8 = text.as_bytes()[0];
if x < ('A' as u8) || x > ('Z' as u8) || (x as char) == 'I' {
return None;
}
x -= ('A' as u8) - 1;
if x > 9 {
x -= 1;
} // eliminate 'I'
let number = u8::from_str(&text[1..]);
let mut y: u8 = 0;
match number {
Ok(num) => y = num,
_ => (),
}
if y == 0 || y > 25 {
return None;
}
Some(Vertex{x: x, y: y})
}
/// Returns a tuple of coordinates.
pub fn to_coords(&self) -> (u8, u8) {
(self.x, self.y)
}
/// Returns the string representation of this vertex (ex: G12).
pub fn to_string(&self) -> String {
let mut letter: u8 = 'A' as u8;
if self.x >= 9 {
// eliminate 'I'
letter += self.x;
} else {
letter += self.x-1;
}
format!("{}{}", letter as char, self.y)
}
}
impl Move {
/// Returns a string representation of the move compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Move::Stone(vrtx) => vrtx.to_string(),
Move::Pass => "pass".to_string(),
Move::Resign => "resign".to_string(),
}
}
}
impl Colour {
/// Returns a string representation of the color compatible with
/// GTPv2.
pub fn to_string(&self) -> String {
match *self {
Colour::White => "white".to_string(),
Colour::Black => "black".to_string(),
}
}
}
impl ColouredMove {
/// Returns a string representation of the colored move compatible
/// with GTPv2.
pub fn to_string(&self) -> String {
self.player.to_string() + &self.mov.to_string()
}
}
#[cfg(test)]
mod tests {
#[test]
fn vertex_to_string() {
let vrtx1 = super::Vertex::from_coords(8u8, 7u8).unwrap();
assert_eq!(&vrtx1.to_string(), "H7");
let vrtx2 = super::Vertex::from_coords(9u8, 13u8).unwrap();
assert_eq!(&vrtx2.to_string(), "J13");
let vrtx3 = super::Vertex::from_coords(19u8, 1u8).unwrap();
assert_eq!(&vrtx3.to_string(), "T1");
}
#[test]
fn string_to_vertex() {
let vrtx1 = super::Vertex::from_str("C7").unwrap();
assert_eq!(vrtx1.to_coords(), (3u8, 7u8));
let vrtx2 = super::Vertex::from_str("J11").unwrap();
assert_eq!(vrtx2.to_coords(), (9u8, 11u8));
let vrtx3 = super::Vertex::from_str("Z25").unwrap();
assert_eq!(vrtx3.to_coords(), (25u8, 25u8));
}
#[test]
#[should_panic]
fn too_big_coordinates() {
let vrtx = super::Vertex::from_coords(26u8, 13u8).unwrap();
assert_eq!(vrtx.to_coords(), (26u8, 13u8));
}
#[test]
#[should_panic]
fn invalid_string() {
let vrtx = super::Vertex::from_str("I13").unwrap();
assert_eq!(vrtx.to_coords(), (9u8, 13u8));
}
}
| final_status_list | identifier_name |
my.rs | use std::{io, iter};
use std::collections::{HashSet, HashMap};
use std::iter::FromIterator;
use std::ops::RangeFull;
use paths_builder::Path;
macro_rules! parse_input {
($x:expr, $t:ident) => ($x.trim().parse::<$t>().unwrap())
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Pt {
x: i32,
y: i32,
}
mod paths_builder {
pub use crate::paths_builder_impl::build_paths;
use crate::*;
#[derive(Debug, Clone)]
pub struct | {
pub moves: Vec<(i8, i8)>, // (dx, dy)
pub nowater: Vec<Pt>, // (x, y) sorted
pub noobstacles: Vec<Pt>, // (x, y) sorted
pub xmin: i32,
pub xmax: i32,
pub ymin: i32,
pub ymax: i32,
}
#[derive(Debug)]
pub struct Paths {
pub paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>>,
}
}
struct BallPaths {
count: usize,
ball: usize,
paths: Vec<(paths_builder::Path,usize)>
}
struct Main {
width : usize,
height: usize,
field : Vec<u8>,
holes : Vec<Pt>,
balls : Vec<(Pt, u8)>,
water : HashSet<Pt>,
obstacles : HashSet<Pt>,
ball_paths : Vec<BallPaths>,
}
impl Main {
fn new(width: i32, height: i32) -> Self {
eprintln!("field size: {} x {}", width, height);
Self {
width: width as usize,
height: height as usize,
field: vec![0u8; (width * height) as usize],
balls: Vec::new(),
holes: Vec::new(),
water: HashSet::new(),
obstacles: HashSet::new(),
ball_paths: Vec::new(),
}
}
fn set_row(&mut self, row : usize, data : &[u8]) {
assert!(row < self.height);
assert!(data.len() == self.width);
let base = self.width * row;
for (col, &c) in data.iter().enumerate() {
self.field[base + col] = match c {
b'.' => 1,
b'X' => {//water
let coords = Pt{ x: col as i32, y: row as i32};
self.water.insert(coords);
0
},
b'H' => {//hole
let coords = Pt{ x: col as i32, y: row as i32};
self.holes.push(coords);
self.obstacles.insert(coords);
0
}
b'0'..=b'9' => {//ball
let coords = Pt{ x: col as i32, y: row as i32};
self.balls.push((coords, c - b'0'));
0
}
_=>panic!()
}
}
}
fn build_paths_for_brute_force(&mut self) -> u32 {
let max_ball = *self.balls.iter().map(|(_,sc)|sc).max().unwrap();
let paths = paths_builder::build_paths(max_ball as usize);
let mut ball_paths : Vec<BallPaths> = Vec::new();
for (ball_idx, ball) in self.balls.iter().enumerate() {
let mut v = BallPaths{ count: 0, ball: ball_idx, paths: Vec::new() };
let paths_by_shot_count = &paths.paths[&(ball.1 as usize)];
for (hole_id, hole) in self.holes.iter().enumerate() {
let dx = hole.x - ball.0.x;
let dy = hole.y - ball.0.y;
if let Some(paths_to_hole) = paths_by_shot_count.get(&(dx, dy)) {
for path in paths_to_hole.iter() {
if let Some(p) = path.from_point(ball.0.x, ball.0.y, self.width, self.height) {
if p.noobstacles.iter().any(|pt|self.obstacles.contains(&pt)) {
continue;
}
if p.nowater.iter().any(|pt|self.water.contains(&pt)) {
continue;
}
v.paths.push((p, hole_id));
v.count += 1;
}
}
}
}
ball_paths.push(v);
}
ball_paths.sort_unstable_by_key(|bp|bp.count);
let bc = ball_paths.iter().fold(1u32, |acc, bp|acc * (bp.count as u32));
self.ball_paths = ball_paths;
bc
}
fn r (&self, used_points : &HashSet<Pt>, used_holes: &HashSet<usize>, pos : usize) -> Option<Vec<(usize, Path)>> {
let is_leaf = pos + 1 == self.balls.len();
let paths = &self.ball_paths[pos];
'outer: for (path, hole) in paths.paths.iter() {
if used_holes.contains(&hole) {
continue;
}
for pt in path.noobstacles.iter().chain(path.nowater.iter()) {
if used_points.contains(pt) {
continue 'outer;
}
}
if is_leaf {
let mut s : Vec<(usize, Path)> = Vec::new();
s.push((paths.ball, path.clone()));
return Some(s);
}
let mut uh = used_holes.clone();
uh.insert(*hole);
let mut up = used_points.clone();
for &pt in path.noobstacles.iter().chain(path.nowater.iter()) {
up.insert(pt);
}
if let Some(mut s) = self.r(&up, &uh, pos + 1) {
s.push((paths.ball, path.clone()));
return Some(s);
}
}
None
}
fn solve(&self, brute_force_k: u32) -> Vec<(usize, Path)> {
if brute_force_k == 1
{
self.ball_paths.iter().map(|bp| (bp.ball, bp.paths[0].0.clone())).collect()
} else {
self.r(&HashSet::new(), &HashSet::new(), 0).unwrap()
}
}
fn run(&mut self) {
let bc = self.build_paths_for_brute_force();
eprintln!("balls: {}, combinations to brute force: {}", self.balls.len(), bc);
let solution = self.solve(bc);
// finally render paths:
let mut field = (0..self.height).map(|_|vec!['.'; self.width]).collect::<Vec<_>>();
for (ball, path) in solution.into_iter() {
let (mut p, mut d) = self.balls[ball];
for (mdx, mdy) in path.moves {
let c = if mdx < 0 {'<'} else if mdx > 0 {'>'} else if mdy < 0 {'^'} else {'v'};
for _ in 0..d {
field[p.y as usize][p.x as usize] = c;
p.x += mdx as i32;
p.y += mdy as i32;
}
d -= 1;
}
}
for row in field {
println!("{}", row.into_iter().collect::<String>());
}
}
}
fn main() {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let inputs = input_line.split(" ").collect::<Vec<_>>();
let width = parse_input!(inputs[0], i32);
let height = parse_input!(inputs[1], i32);
let mut main = Main::new(width, height);
for i in 0..height as usize {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let row = input_line.trim();//.to_string();
main.set_row(i, row.as_bytes());
}
//eprintln!("field size {} x {}, {} balls, {} holes, {} shots max", main.width, main.height, main.balls.len(), main.holes.len(),
// main.balls.iter().map(|(_,_,n)| *n as u32).sum::<u32>());
main.run();
}
mod paths_builder_impl {
use crate::paths_builder::*;
use crate::*;
impl paths_builder::Path {
pub fn from_point(
&self,
x: i32,
y: i32,
field_width: usize,
field_height: usize,
) -> Option<Path> {
if x + self.xmin < 0
|| x + self.xmax >= (field_width as i32)
|| y + self.ymin < 0
|| y + self.ymax >= (field_height as i32)
{
return None;
}
Some(Path {
moves: self.moves.clone(),
nowater: self
.nowater
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
noobstacles: self
.noobstacles
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
xmin: self.xmin + x,
xmax: self.xmax + x,
ymin: self.ymin + y,
ymax: self.ymax + y,
})
}
}
pub fn build_paths(max_dist: usize) -> Paths {
let mut state = State {
paths: HashMap::new(),
};
for dist in 1..=max_dist {
ff(&mut state, &Context::new(), dist);
}
let mut paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>> = HashMap::new();
for (coords, contexts) in state.paths.into_iter() {
for ctx in contexts.into_iter() {
let k0 = ctx.moves[0].1;
paths
.entry(k0)
.or_insert(HashMap::new())
.entry(coords)
.or_insert(Vec::new())
.push(context_to_path(ctx));
}
}
Paths { paths }
}
fn context_to_path(ctx: Context) -> Path {
let moves = ctx
.moves
.into_iter()
.map(|(dir, _)| (DIRS[dir].0 as i8, DIRS[dir].1 as i8))
.collect();
let cx = ctx.x;
let cy = ctx.y;
let mut nowater: Vec<Pt> = ctx
.nowater
.into_iter()
.filter_map(|pt| {
if pt.x == cx && pt.y == cy {
None
} else {
Some(pt)
}
})
.collect();
nowater.sort_unstable();
let mut noobstacles: Vec<Pt> = ctx
.used
.difference(&HashSet::from_iter(nowater.iter().cloned()))
.map(|&x| x)
.filter(|&pt| (pt.x != 0 || pt.y != 0) && (pt.x != cx || pt.y != cy))
.collect();
let mut xmin = ctx.x;
let mut xmax = ctx.x;
let mut ymin = ctx.y;
let mut ymax = ctx.y;
for &pt in nowater.iter().chain(noobstacles.iter()) {
xmin = xmin.min(pt.x);
xmax = xmax.max(pt.x);
ymin = ymin.min(pt.y);
ymax = ymax.max(pt.y);
}
noobstacles.sort_unstable();
Path {
moves,
nowater,
noobstacles,
xmin,
xmax,
ymin,
ymax,
}
}
fn ff(state: &mut State, ctx: &Context, dist: usize) {
let c = (ctx.x, ctx.y);
if ctx.x != 0 || ctx.y != 0 {
state.paths.entry(c).or_insert(Vec::new()).push(ctx.clone());
}
if dist == 0 {
return;
}
for d in 0..4 {
if let Some(c) = ctx.apply_move(d, dist) {
ff(state, &c, dist - 1);
}
}
}
struct State {
paths: HashMap<(i32, i32), Vec<Context>>,
}
#[derive(Clone)]
struct Context {
x: i32,
y: i32,
used: HashSet<Pt>, // (x, y)
moves: Vec<(usize, usize)>, // (dir, dist)
nowater: Vec<Pt>, // (x, y)
}
impl Context {
fn new() -> Self {
let mut used = HashSet::new();
used.insert(Pt { x: 0, y: 0 });
Self {
x: 0,
y: 0,
used,
moves: Vec::new(),
nowater: Vec::new(),
}
}
fn apply_move(&self, dir: usize, dist: usize) -> Option<Self> {
let mut x = self.x;
let mut y = self.y;
let dx = DIRS[dir].0;
let dy = DIRS[dir].1;
let mut used = self.used.clone();
if dist == 0 {
return None;
}
for _ in 0..dist {
x += dx;
y += dy;
if used.contains(&Pt { x, y }) {
return None;
}
used.insert(Pt { x, y });
}
let mut moves = self.moves.clone();
moves.push((dir, dist));
let mut nowater = self.nowater.clone();
nowater.push(Pt { x, y });
Some(Self {
x,
y,
used,
moves,
nowater,
})
}
}
const DIRS: &[(i32, i32)] = &[(-1, 0), (1, 0), (0, 1), (0, -1)];
}
| Path | identifier_name |
my.rs | use std::{io, iter};
use std::collections::{HashSet, HashMap};
use std::iter::FromIterator;
use std::ops::RangeFull;
use paths_builder::Path;
macro_rules! parse_input {
($x:expr, $t:ident) => ($x.trim().parse::<$t>().unwrap())
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Pt {
x: i32,
y: i32,
}
mod paths_builder {
pub use crate::paths_builder_impl::build_paths;
use crate::*;
#[derive(Debug, Clone)]
pub struct Path {
pub moves: Vec<(i8, i8)>, // (dx, dy)
pub nowater: Vec<Pt>, // (x, y) sorted
pub noobstacles: Vec<Pt>, // (x, y) sorted
pub xmin: i32,
pub xmax: i32,
pub ymin: i32, | pub struct Paths {
pub paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>>,
}
}
struct BallPaths {
count: usize,
ball: usize,
paths: Vec<(paths_builder::Path,usize)>
}
struct Main {
width : usize,
height: usize,
field : Vec<u8>,
holes : Vec<Pt>,
balls : Vec<(Pt, u8)>,
water : HashSet<Pt>,
obstacles : HashSet<Pt>,
ball_paths : Vec<BallPaths>,
}
impl Main {
fn new(width: i32, height: i32) -> Self {
eprintln!("field size: {} x {}", width, height);
Self {
width: width as usize,
height: height as usize,
field: vec![0u8; (width * height) as usize],
balls: Vec::new(),
holes: Vec::new(),
water: HashSet::new(),
obstacles: HashSet::new(),
ball_paths: Vec::new(),
}
}
fn set_row(&mut self, row : usize, data : &[u8]) {
assert!(row < self.height);
assert!(data.len() == self.width);
let base = self.width * row;
for (col, &c) in data.iter().enumerate() {
self.field[base + col] = match c {
b'.' => 1,
b'X' => {//water
let coords = Pt{ x: col as i32, y: row as i32};
self.water.insert(coords);
0
},
b'H' => {//hole
let coords = Pt{ x: col as i32, y: row as i32};
self.holes.push(coords);
self.obstacles.insert(coords);
0
}
b'0'..=b'9' => {//ball
let coords = Pt{ x: col as i32, y: row as i32};
self.balls.push((coords, c - b'0'));
0
}
_=>panic!()
}
}
}
fn build_paths_for_brute_force(&mut self) -> u32 {
let max_ball = *self.balls.iter().map(|(_,sc)|sc).max().unwrap();
let paths = paths_builder::build_paths(max_ball as usize);
let mut ball_paths : Vec<BallPaths> = Vec::new();
for (ball_idx, ball) in self.balls.iter().enumerate() {
let mut v = BallPaths{ count: 0, ball: ball_idx, paths: Vec::new() };
let paths_by_shot_count = &paths.paths[&(ball.1 as usize)];
for (hole_id, hole) in self.holes.iter().enumerate() {
let dx = hole.x - ball.0.x;
let dy = hole.y - ball.0.y;
if let Some(paths_to_hole) = paths_by_shot_count.get(&(dx, dy)) {
for path in paths_to_hole.iter() {
if let Some(p) = path.from_point(ball.0.x, ball.0.y, self.width, self.height) {
if p.noobstacles.iter().any(|pt|self.obstacles.contains(&pt)) {
continue;
}
if p.nowater.iter().any(|pt|self.water.contains(&pt)) {
continue;
}
v.paths.push((p, hole_id));
v.count += 1;
}
}
}
}
ball_paths.push(v);
}
ball_paths.sort_unstable_by_key(|bp|bp.count);
let bc = ball_paths.iter().fold(1u32, |acc, bp|acc * (bp.count as u32));
self.ball_paths = ball_paths;
bc
}
fn r (&self, used_points : &HashSet<Pt>, used_holes: &HashSet<usize>, pos : usize) -> Option<Vec<(usize, Path)>> {
let is_leaf = pos + 1 == self.balls.len();
let paths = &self.ball_paths[pos];
'outer: for (path, hole) in paths.paths.iter() {
if used_holes.contains(&hole) {
continue;
}
for pt in path.noobstacles.iter().chain(path.nowater.iter()) {
if used_points.contains(pt) {
continue 'outer;
}
}
if is_leaf {
let mut s : Vec<(usize, Path)> = Vec::new();
s.push((paths.ball, path.clone()));
return Some(s);
}
let mut uh = used_holes.clone();
uh.insert(*hole);
let mut up = used_points.clone();
for &pt in path.noobstacles.iter().chain(path.nowater.iter()) {
up.insert(pt);
}
if let Some(mut s) = self.r(&up, &uh, pos + 1) {
s.push((paths.ball, path.clone()));
return Some(s);
}
}
None
}
fn solve(&self, brute_force_k: u32) -> Vec<(usize, Path)> {
if brute_force_k == 1
{
self.ball_paths.iter().map(|bp| (bp.ball, bp.paths[0].0.clone())).collect()
} else {
self.r(&HashSet::new(), &HashSet::new(), 0).unwrap()
}
}
fn run(&mut self) {
let bc = self.build_paths_for_brute_force();
eprintln!("balls: {}, combinations to brute force: {}", self.balls.len(), bc);
let solution = self.solve(bc);
// finally render paths:
let mut field = (0..self.height).map(|_|vec!['.'; self.width]).collect::<Vec<_>>();
for (ball, path) in solution.into_iter() {
let (mut p, mut d) = self.balls[ball];
for (mdx, mdy) in path.moves {
let c = if mdx < 0 {'<'} else if mdx > 0 {'>'} else if mdy < 0 {'^'} else {'v'};
for _ in 0..d {
field[p.y as usize][p.x as usize] = c;
p.x += mdx as i32;
p.y += mdy as i32;
}
d -= 1;
}
}
for row in field {
println!("{}", row.into_iter().collect::<String>());
}
}
}
fn main() {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let inputs = input_line.split(" ").collect::<Vec<_>>();
let width = parse_input!(inputs[0], i32);
let height = parse_input!(inputs[1], i32);
let mut main = Main::new(width, height);
for i in 0..height as usize {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let row = input_line.trim();//.to_string();
main.set_row(i, row.as_bytes());
}
//eprintln!("field size {} x {}, {} balls, {} holes, {} shots max", main.width, main.height, main.balls.len(), main.holes.len(),
// main.balls.iter().map(|(_,_,n)| *n as u32).sum::<u32>());
main.run();
}
mod paths_builder_impl {
use crate::paths_builder::*;
use crate::*;
impl paths_builder::Path {
pub fn from_point(
&self,
x: i32,
y: i32,
field_width: usize,
field_height: usize,
) -> Option<Path> {
if x + self.xmin < 0
|| x + self.xmax >= (field_width as i32)
|| y + self.ymin < 0
|| y + self.ymax >= (field_height as i32)
{
return None;
}
Some(Path {
moves: self.moves.clone(),
nowater: self
.nowater
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
noobstacles: self
.noobstacles
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
xmin: self.xmin + x,
xmax: self.xmax + x,
ymin: self.ymin + y,
ymax: self.ymax + y,
})
}
}
pub fn build_paths(max_dist: usize) -> Paths {
let mut state = State {
paths: HashMap::new(),
};
for dist in 1..=max_dist {
ff(&mut state, &Context::new(), dist);
}
let mut paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>> = HashMap::new();
for (coords, contexts) in state.paths.into_iter() {
for ctx in contexts.into_iter() {
let k0 = ctx.moves[0].1;
paths
.entry(k0)
.or_insert(HashMap::new())
.entry(coords)
.or_insert(Vec::new())
.push(context_to_path(ctx));
}
}
Paths { paths }
}
fn context_to_path(ctx: Context) -> Path {
let moves = ctx
.moves
.into_iter()
.map(|(dir, _)| (DIRS[dir].0 as i8, DIRS[dir].1 as i8))
.collect();
let cx = ctx.x;
let cy = ctx.y;
let mut nowater: Vec<Pt> = ctx
.nowater
.into_iter()
.filter_map(|pt| {
if pt.x == cx && pt.y == cy {
None
} else {
Some(pt)
}
})
.collect();
nowater.sort_unstable();
let mut noobstacles: Vec<Pt> = ctx
.used
.difference(&HashSet::from_iter(nowater.iter().cloned()))
.map(|&x| x)
.filter(|&pt| (pt.x != 0 || pt.y != 0) && (pt.x != cx || pt.y != cy))
.collect();
let mut xmin = ctx.x;
let mut xmax = ctx.x;
let mut ymin = ctx.y;
let mut ymax = ctx.y;
for &pt in nowater.iter().chain(noobstacles.iter()) {
xmin = xmin.min(pt.x);
xmax = xmax.max(pt.x);
ymin = ymin.min(pt.y);
ymax = ymax.max(pt.y);
}
noobstacles.sort_unstable();
Path {
moves,
nowater,
noobstacles,
xmin,
xmax,
ymin,
ymax,
}
}
fn ff(state: &mut State, ctx: &Context, dist: usize) {
let c = (ctx.x, ctx.y);
if ctx.x != 0 || ctx.y != 0 {
state.paths.entry(c).or_insert(Vec::new()).push(ctx.clone());
}
if dist == 0 {
return;
}
for d in 0..4 {
if let Some(c) = ctx.apply_move(d, dist) {
ff(state, &c, dist - 1);
}
}
}
struct State {
paths: HashMap<(i32, i32), Vec<Context>>,
}
#[derive(Clone)]
struct Context {
x: i32,
y: i32,
used: HashSet<Pt>, // (x, y)
moves: Vec<(usize, usize)>, // (dir, dist)
nowater: Vec<Pt>, // (x, y)
}
impl Context {
fn new() -> Self {
let mut used = HashSet::new();
used.insert(Pt { x: 0, y: 0 });
Self {
x: 0,
y: 0,
used,
moves: Vec::new(),
nowater: Vec::new(),
}
}
fn apply_move(&self, dir: usize, dist: usize) -> Option<Self> {
let mut x = self.x;
let mut y = self.y;
let dx = DIRS[dir].0;
let dy = DIRS[dir].1;
let mut used = self.used.clone();
if dist == 0 {
return None;
}
for _ in 0..dist {
x += dx;
y += dy;
if used.contains(&Pt { x, y }) {
return None;
}
used.insert(Pt { x, y });
}
let mut moves = self.moves.clone();
moves.push((dir, dist));
let mut nowater = self.nowater.clone();
nowater.push(Pt { x, y });
Some(Self {
x,
y,
used,
moves,
nowater,
})
}
}
const DIRS: &[(i32, i32)] = &[(-1, 0), (1, 0), (0, 1), (0, -1)];
} | pub ymax: i32,
}
#[derive(Debug)] | random_line_split |
my.rs | use std::{io, iter};
use std::collections::{HashSet, HashMap};
use std::iter::FromIterator;
use std::ops::RangeFull;
use paths_builder::Path;
macro_rules! parse_input {
($x:expr, $t:ident) => ($x.trim().parse::<$t>().unwrap())
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Pt {
x: i32,
y: i32,
}
mod paths_builder {
pub use crate::paths_builder_impl::build_paths;
use crate::*;
#[derive(Debug, Clone)]
pub struct Path {
pub moves: Vec<(i8, i8)>, // (dx, dy)
pub nowater: Vec<Pt>, // (x, y) sorted
pub noobstacles: Vec<Pt>, // (x, y) sorted
pub xmin: i32,
pub xmax: i32,
pub ymin: i32,
pub ymax: i32,
}
#[derive(Debug)]
pub struct Paths {
pub paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>>,
}
}
struct BallPaths {
count: usize,
ball: usize,
paths: Vec<(paths_builder::Path,usize)>
}
struct Main {
width : usize,
height: usize,
field : Vec<u8>,
holes : Vec<Pt>,
balls : Vec<(Pt, u8)>,
water : HashSet<Pt>,
obstacles : HashSet<Pt>,
ball_paths : Vec<BallPaths>,
}
impl Main {
fn new(width: i32, height: i32) -> Self {
eprintln!("field size: {} x {}", width, height);
Self {
width: width as usize,
height: height as usize,
field: vec![0u8; (width * height) as usize],
balls: Vec::new(),
holes: Vec::new(),
water: HashSet::new(),
obstacles: HashSet::new(),
ball_paths: Vec::new(),
}
}
fn set_row(&mut self, row : usize, data : &[u8]) {
assert!(row < self.height);
assert!(data.len() == self.width);
let base = self.width * row;
for (col, &c) in data.iter().enumerate() {
self.field[base + col] = match c {
b'.' => 1,
b'X' => {//water
let coords = Pt{ x: col as i32, y: row as i32};
self.water.insert(coords);
0
},
b'H' => {//hole
let coords = Pt{ x: col as i32, y: row as i32};
self.holes.push(coords);
self.obstacles.insert(coords);
0
}
b'0'..=b'9' => {//ball
let coords = Pt{ x: col as i32, y: row as i32};
self.balls.push((coords, c - b'0'));
0
}
_=>panic!()
}
}
}
fn build_paths_for_brute_force(&mut self) -> u32 {
let max_ball = *self.balls.iter().map(|(_,sc)|sc).max().unwrap();
let paths = paths_builder::build_paths(max_ball as usize);
let mut ball_paths : Vec<BallPaths> = Vec::new();
for (ball_idx, ball) in self.balls.iter().enumerate() {
let mut v = BallPaths{ count: 0, ball: ball_idx, paths: Vec::new() };
let paths_by_shot_count = &paths.paths[&(ball.1 as usize)];
for (hole_id, hole) in self.holes.iter().enumerate() {
let dx = hole.x - ball.0.x;
let dy = hole.y - ball.0.y;
if let Some(paths_to_hole) = paths_by_shot_count.get(&(dx, dy)) {
for path in paths_to_hole.iter() {
if let Some(p) = path.from_point(ball.0.x, ball.0.y, self.width, self.height) {
if p.noobstacles.iter().any(|pt|self.obstacles.contains(&pt)) {
continue;
}
if p.nowater.iter().any(|pt|self.water.contains(&pt)) {
continue;
}
v.paths.push((p, hole_id));
v.count += 1;
}
}
}
}
ball_paths.push(v);
}
ball_paths.sort_unstable_by_key(|bp|bp.count);
let bc = ball_paths.iter().fold(1u32, |acc, bp|acc * (bp.count as u32));
self.ball_paths = ball_paths;
bc
}
fn r (&self, used_points : &HashSet<Pt>, used_holes: &HashSet<usize>, pos : usize) -> Option<Vec<(usize, Path)>> {
let is_leaf = pos + 1 == self.balls.len();
let paths = &self.ball_paths[pos];
'outer: for (path, hole) in paths.paths.iter() {
if used_holes.contains(&hole) |
for pt in path.noobstacles.iter().chain(path.nowater.iter()) {
if used_points.contains(pt) {
continue 'outer;
}
}
if is_leaf {
let mut s : Vec<(usize, Path)> = Vec::new();
s.push((paths.ball, path.clone()));
return Some(s);
}
let mut uh = used_holes.clone();
uh.insert(*hole);
let mut up = used_points.clone();
for &pt in path.noobstacles.iter().chain(path.nowater.iter()) {
up.insert(pt);
}
if let Some(mut s) = self.r(&up, &uh, pos + 1) {
s.push((paths.ball, path.clone()));
return Some(s);
}
}
None
}
fn solve(&self, brute_force_k: u32) -> Vec<(usize, Path)> {
if brute_force_k == 1
{
self.ball_paths.iter().map(|bp| (bp.ball, bp.paths[0].0.clone())).collect()
} else {
self.r(&HashSet::new(), &HashSet::new(), 0).unwrap()
}
}
fn run(&mut self) {
let bc = self.build_paths_for_brute_force();
eprintln!("balls: {}, combinations to brute force: {}", self.balls.len(), bc);
let solution = self.solve(bc);
// finally render paths:
let mut field = (0..self.height).map(|_|vec!['.'; self.width]).collect::<Vec<_>>();
for (ball, path) in solution.into_iter() {
let (mut p, mut d) = self.balls[ball];
for (mdx, mdy) in path.moves {
let c = if mdx < 0 {'<'} else if mdx > 0 {'>'} else if mdy < 0 {'^'} else {'v'};
for _ in 0..d {
field[p.y as usize][p.x as usize] = c;
p.x += mdx as i32;
p.y += mdy as i32;
}
d -= 1;
}
}
for row in field {
println!("{}", row.into_iter().collect::<String>());
}
}
}
fn main() {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let inputs = input_line.split(" ").collect::<Vec<_>>();
let width = parse_input!(inputs[0], i32);
let height = parse_input!(inputs[1], i32);
let mut main = Main::new(width, height);
for i in 0..height as usize {
let mut input_line = String::new();
io::stdin().read_line(&mut input_line).unwrap();
let row = input_line.trim();//.to_string();
main.set_row(i, row.as_bytes());
}
//eprintln!("field size {} x {}, {} balls, {} holes, {} shots max", main.width, main.height, main.balls.len(), main.holes.len(),
// main.balls.iter().map(|(_,_,n)| *n as u32).sum::<u32>());
main.run();
}
mod paths_builder_impl {
use crate::paths_builder::*;
use crate::*;
impl paths_builder::Path {
pub fn from_point(
&self,
x: i32,
y: i32,
field_width: usize,
field_height: usize,
) -> Option<Path> {
if x + self.xmin < 0
|| x + self.xmax >= (field_width as i32)
|| y + self.ymin < 0
|| y + self.ymax >= (field_height as i32)
{
return None;
}
Some(Path {
moves: self.moves.clone(),
nowater: self
.nowater
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
noobstacles: self
.noobstacles
.iter()
.map(|&pt| Pt {
x: pt.x + x,
y: pt.y + y,
})
.collect(),
xmin: self.xmin + x,
xmax: self.xmax + x,
ymin: self.ymin + y,
ymax: self.ymax + y,
})
}
}
pub fn build_paths(max_dist: usize) -> Paths {
let mut state = State {
paths: HashMap::new(),
};
for dist in 1..=max_dist {
ff(&mut state, &Context::new(), dist);
}
let mut paths: HashMap<usize, HashMap<(i32, i32), Vec<Path>>> = HashMap::new();
for (coords, contexts) in state.paths.into_iter() {
for ctx in contexts.into_iter() {
let k0 = ctx.moves[0].1;
paths
.entry(k0)
.or_insert(HashMap::new())
.entry(coords)
.or_insert(Vec::new())
.push(context_to_path(ctx));
}
}
Paths { paths }
}
fn context_to_path(ctx: Context) -> Path {
let moves = ctx
.moves
.into_iter()
.map(|(dir, _)| (DIRS[dir].0 as i8, DIRS[dir].1 as i8))
.collect();
let cx = ctx.x;
let cy = ctx.y;
let mut nowater: Vec<Pt> = ctx
.nowater
.into_iter()
.filter_map(|pt| {
if pt.x == cx && pt.y == cy {
None
} else {
Some(pt)
}
})
.collect();
nowater.sort_unstable();
let mut noobstacles: Vec<Pt> = ctx
.used
.difference(&HashSet::from_iter(nowater.iter().cloned()))
.map(|&x| x)
.filter(|&pt| (pt.x != 0 || pt.y != 0) && (pt.x != cx || pt.y != cy))
.collect();
let mut xmin = ctx.x;
let mut xmax = ctx.x;
let mut ymin = ctx.y;
let mut ymax = ctx.y;
for &pt in nowater.iter().chain(noobstacles.iter()) {
xmin = xmin.min(pt.x);
xmax = xmax.max(pt.x);
ymin = ymin.min(pt.y);
ymax = ymax.max(pt.y);
}
noobstacles.sort_unstable();
Path {
moves,
nowater,
noobstacles,
xmin,
xmax,
ymin,
ymax,
}
}
fn ff(state: &mut State, ctx: &Context, dist: usize) {
let c = (ctx.x, ctx.y);
if ctx.x != 0 || ctx.y != 0 {
state.paths.entry(c).or_insert(Vec::new()).push(ctx.clone());
}
if dist == 0 {
return;
}
for d in 0..4 {
if let Some(c) = ctx.apply_move(d, dist) {
ff(state, &c, dist - 1);
}
}
}
struct State {
paths: HashMap<(i32, i32), Vec<Context>>,
}
#[derive(Clone)]
struct Context {
x: i32,
y: i32,
used: HashSet<Pt>, // (x, y)
moves: Vec<(usize, usize)>, // (dir, dist)
nowater: Vec<Pt>, // (x, y)
}
impl Context {
fn new() -> Self {
let mut used = HashSet::new();
used.insert(Pt { x: 0, y: 0 });
Self {
x: 0,
y: 0,
used,
moves: Vec::new(),
nowater: Vec::new(),
}
}
fn apply_move(&self, dir: usize, dist: usize) -> Option<Self> {
let mut x = self.x;
let mut y = self.y;
let dx = DIRS[dir].0;
let dy = DIRS[dir].1;
let mut used = self.used.clone();
if dist == 0 {
return None;
}
for _ in 0..dist {
x += dx;
y += dy;
if used.contains(&Pt { x, y }) {
return None;
}
used.insert(Pt { x, y });
}
let mut moves = self.moves.clone();
moves.push((dir, dist));
let mut nowater = self.nowater.clone();
nowater.push(Pt { x, y });
Some(Self {
x,
y,
used,
moves,
nowater,
})
}
}
const DIRS: &[(i32, i32)] = &[(-1, 0), (1, 0), (0, 1), (0, -1)];
}
| {
continue;
} | conditional_block |
FireBehaviorForecaster.js | /**
* @file FireBehaviorForecaster.js provides site specific terrain, weather, and fire behavior
* for a 48-hour period.
* @copyright 2021 Systems for Environmental Management
* @author Collin D. Bevins, <[email protected]>
* @license MIT
*/
import { Sim } from '@cbevins/fire-behavior-simulator'
import moment from 'moment'
import { getForecast as getWeatherapi } from './wxQuery-weatherapi.js'
import { getTimelines as getTomorrow } from './wxQuery-tomorrow.js'
import { mapquestElevSlopeAspect as mapQuest } from '../Globe/elevQuery-mapquest.js'
import { usgsElevSlopeAspect as usgs } from '../Globe/elevQuery-usgs.js'
export class FireBehaviorForecaster {
constructor () {
this.elevationApi = 'mapquest.com' // or 'usgs.gov'
this.weatherApi = 'tomorrow.io' // or 'weatherapi.com'
this.sim = new Sim()
this.dag = this.sim.createDag('FireForecast')
this.dag.configure([
['configure.fuel.primary', 'catalog'], // The primary fuel is specified by a fuel model catalog key
['configure.fuel.secondary', 'none'], // There are no secondary fuels
['configure.fuel.moisture', 'fosberg'], // 3 dead moisture classes and a singe live category moisture
['configure.fuel.curedHerbFraction', 'input'],
['configure.wind.speed', 'at10m'],
['configure.wind.direction', 'sourceFromNorth'],
['configure.slope.steepness', 'ratio'],
['configure.fuel.windSpeedAdjustmentFactor', 'input'],
['configure.fire.vector', 'fromHead'],
['configure.temperature.humidity', 'humidity'], // enter dry bulb and humidity
['configure.fuel.chaparralTotalLoad', 'input'], // unimportant
['configure.fire.weightingMethod', 'arithmetic'], // unimportant
['configure.fire.effectiveWindSpeedLimit', 'ignored'],
['configure.fire.firelineIntensity', 'flameLength'],
['configure.fire.lengthToWidthRatio', 'lengthToWidthRatio']
])
this.dag.select([
'surface.primary.fuel.model.behave.parms.cured.herb.fraction', // ratio
'surface.primary.fuel.fire.effectiveWindSpeed', // ft/min
'surface.primary.fuel.fire.flameResidenceTime', // min
'surface.primary.fuel.fire.heading.fromUpslope', // degrees
'surface.primary.fuel.fire.heading.fromNorth', // degrees
'surface.primary.fuel.fire.heatPerUnitArea', // btu/ft2 |
'surface.primary.fuel.fire.reactionIntensity', // btu/ft2/min
'surface.fire.ellipse.axis.lengthToWidthRatio', // ratio
'surface.fire.ellipse.back.firelineIntensity', // Btu/ft/s
'surface.fire.ellipse.back.flameLength', // ft
'surface.fire.ellipse.back.scorchHeight', // ft
'surface.fire.ellipse.back.spreadDistance', // ft
'surface.fire.ellipse.back.spreadRate', // ft/min
'surface.fire.ellipse.flank.firelineIntensity',
'surface.fire.ellipse.flank.flameLength',
'surface.fire.ellipse.flank.scorchHeight',
'surface.fire.ellipse.flank.spreadDistance',
'surface.fire.ellipse.flank.spreadRate',
'surface.fire.ellipse.head.firelineIntensity',
'surface.fire.ellipse.head.flameLength',
'surface.fire.ellipse.head.scorchHeight',
'surface.fire.ellipse.head.spreadDistance',
'surface.fire.ellipse.head.spreadRate',
'surface.fire.ellipse.size.area', // ft2
'surface.fire.ellipse.size.length', // ft
'surface.fire.ellipse.size.perimeter', // ft
'surface.fire.ellipse.size.width', // ft
'site.moisture.dead.tl1h', // ratio
'site.moisture.dead.tl10h',
'site.moisture.dead.tl100h'
])
}
/**
* Adds fire behavior to the weather records
* @param {array} parms Array of input parameters
* @param {array} wxArray Array of hourly weather forecast objects
* @returns {array} wxArray with 16 or so fire behavior properties added
*/
| (parms, wxArray) {
wxArray.forEach(wx => {
const input = {
fuel: parms.fuel,
curedHerb: 0.01 * parms.cured,
month: +(wx.date).substr(5, 2),
hour: +(wx.time).substr(0, 2),
elevDiff: parms.elevdiff,
aspect: parms.aspect,
slope: 0.01 * parms.slope,
dryBulb: wx.dryBulb,
humidity: 0.01 * wx.humidity,
shading: 0.01 * wx.cloudCover,
liveMoisture: 0.01 * parms.live,
windAt10m: 88 * wx.windSpeed,
windGust: 88 * wx.windGust,
windAdj: parms.waf,
windFrom: wx.windFrom,
elapsed: 60
}
const output = this.run(input)
wx.tl1h = 100 * output.moisture.fosberg.tl1h // ratio
wx.tl10h = 100 * output.moisture.tl10h // ratio
wx.tl100h = 100 * output.moisture.tl100h // ratio
wx.spreadRate = output.heading.spreadRate // ft/min
wx.flameLength = output.heading.flameLength // ft
wx.scorchHeight = output.heading.scorchHeight // ft
wx.headingFromNorth = output.fire.headingFromNorth // degrees
wx.gust = {
spreadRate: output.heading.gust.spreadRate, // ft/min
flameLength: output.heading.gust.flameLength, // ft
scorchHeight: output.heading.gust.scorchHeight, // ft
headingFromNorth: output.fire.gust.headingFromNorth // degrees
}
})
return wxArray
}
/**
* Gets fire behavior for the supplied inputs
* @param {array} inp Array of fire behavior input values
* @returns {object} Fire behavior object
*/
run (inp) {
this.dag.input([
['surface.primary.fuel.model.catalogKey', [inp.fuel]],
['surface.primary.fuel.model.behave.parms.cured.herb.fraction', [inp.curedHerb]], // fraction
['site.date.month', [inp.month]],
['site.time.hour', [inp.hour]],
['site.location.elevation.diff', [inp.elevDiff]],
['site.slope.direction.aspect', [inp.aspect]], // degrees clockwise from north
['site.slope.steepness.ratio', [inp.slope]], // vertical rise / horizontal reach
['site.temperature.air', [inp.dryBulb]], // oF
['site.temperature.relativeHumidity', [inp.humidity]], // oF
['site.temperature.shading', [inp.shading]], // oF
['site.moisture.live.herb', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.moisture.live.stem', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.wind.speed.at10m', [inp.windAt10m]], // feet per minute (1 mph = 88 ft/min)
['site.windSpeedAdjustmentFactor', [inp.windAdj]], // fraction of 10m wind speed
['site.wind.direction.source.fromNorth', [inp.windFrom]], // direction of wind origin, degrees clockwise from north
['site.fire.time.sinceIgnition', [inp.elapsed]] // minutes
]).run()
const output = {
fire: {
effectiveWindSpeed: this.dag.node('surface.primary.fuel.fire.effectiveWindSpeed').value(), // ft/min
flameResidenceTime: this.dag.node('surface.primary.fuel.fire.flameResidenceTime').value(), // min
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value(), // degrees
heatPerUnitArea: this.dag.node('surface.primary.fuel.fire.heatPerUnitArea').value(), // btu/ft2 |
reactionIntensity: this.dag.node('surface.primary.fuel.fire.reactionIntensity').value() // btu/ft2/min
},
moisture: {
tl1h: this.dag.node('site.moisture.dead.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.tl100h').value(),
fosberg: {
reference: this.dag.node('site.moisture.dead.fosberg.reference').value(),
correction: this.dag.node('site.moisture.dead.fosberg.correction').value(),
tl1h: this.dag.node('site.moisture.dead.fosberg.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.fosberg.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.fosberg.tl100h').value()
}
},
ellipse: {
lwRatio: this.dag.node('surface.fire.ellipse.axis.lengthToWidthRatio').value(), // ratio
area: this.dag.node('surface.fire.ellipse.size.area').value(), // ft2
length: this.dag.node('surface.fire.ellipse.size.length').value(), // ft
perimeter: this.dag.node('surface.fire.ellipse.size.perimeter').value(), // ft
width: this.dag.node('surface.fire.ellipse.size.width').value() // ft
},
backing: {
firelineIntensity: this.dag.node('surface.fire.ellipse.back.firelineIntensity').value(), // Btu/ft/s
flameLength: this.dag.node('surface.fire.ellipse.back.flameLength').value(), // ft
scorchHeight: this.dag.node('surface.fire.ellipse.back.scorchHeight').value(), // ft
spreadDistance: this.dag.node('surface.fire.ellipse.back.spreadDistance').value(), // ft
spreadRate: this.dag.node('surface.fire.ellipse.back.spreadRate').value() // ft/min
},
flanking: {
firelineIntensity: this.dag.node('surface.fire.ellipse.flank.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.flank.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.flank.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.flank.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.flank.spreadRate').value()
},
heading: {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
}
// Add fire behavior during wind gusts
this.dag.input([
['site.wind.speed.at10m', [inp.windGust]] // feet per minute (1 mph = 88 ft/min)
]).run()
output.heading.gust = {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
output.fire.gust = {
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value() // degrees
}
return output
}
/**
* Display the required configuration nodes
*/
showConfigs () {
const activeConfigs = this.dag.requiredConfigNodes() // returns an array of DagNode references
console.log('ACTIVE CONFIGS:')
activeConfigs.forEach(cfg => { console.log(cfg.key(), cfg.value()) })
}
/**
* Display the required input nodes
*/
showInputs () {
const requiredInputs = this.dag.requiredInputNodes() // returns an array of DagNode references
console.log('REQUIRED INPUTS:')
requiredInputs.forEach(node => { console.log(node.key()) })
}
/**
* Gets the weather and fire forecast - MAIN ENTRY POINT
*
* @param {array} parms
* - name {string} Location name
* - lat {number} Location latitude north (+) or south (-)
* - lon {number} Location longitude east (+) or west (-)
* - timezone {string} Timezone of time values, according to IANA Timezone Names (defaults to 'UTC')
* (see https://docs.tomorrow.io/reference/api-formats#timezone)
* - fuel {string} fuel model key
* - waf {number} wind speed adjustment factor from 20-ft to midflame height (fraction)
* - cured {number} herb cured fraction (%)
* - live {number} live (herb and stem) fuel moisture (%)
* - elevdiff {number} ELevation difference between forecast location and site (ft)
*
* @returns {array} Array of hourly forecast objects
*/
async getForecast (parms) {
// configure the time frame up to 6 hours back and 15 days out
const now = moment.utc()
parms.start = moment.utc(now).startOf('hour').toISOString() // "2019-03-20T14:09:50Z"
parms.end = moment.utc(now).add(48, 'hours').toISOString()
// First get elevation, slope, and aspect and add it to the parms
const sampleRes = 1 / (60 * 60 * 3) // 1/3 arc-second in decimal degrees
const cellWidth = 2 // Double sample distance to ensure adjacent cells have different sample
let _esa
if (this.elevationApi === 'usgs.gov') {
_esa = usgs(parms.lat, parms.lon, sampleRes, cellWidth)
} else { // mapquest.com
_esa = mapQuest(parms.lat, parms.lon, sampleRes, cellWidth)
}
// Next get weather data from tomorrow.io or weatherapi.com
let _wx
if (this.weatherApi === 'weatherapi.com') {
_wx = getWeatherapi(parms.lat, parms.lon, 1, 'fire')
} else { // tomorrow.io
_wx = getTomorrow(parms.lat, parms.lon, parms.start, parms.end, parms.timezone)
}
// Run requests in parallel...
const esa = await _esa
const wx = await _wx
parms.elev = esa.elev
parms.slope = 100 * esa.slopeRatio
parms.aspect = esa.aspect
// Add fire behavior to the weather record and return
this.addFireBehavior(parms, wx)
return { parms, wx }
}
}
| addFireBehavior | identifier_name |
FireBehaviorForecaster.js | /**
* @file FireBehaviorForecaster.js provides site specific terrain, weather, and fire behavior
* for a 48-hour period.
* @copyright 2021 Systems for Environmental Management
* @author Collin D. Bevins, <[email protected]>
* @license MIT
*/
import { Sim } from '@cbevins/fire-behavior-simulator'
import moment from 'moment'
import { getForecast as getWeatherapi } from './wxQuery-weatherapi.js'
import { getTimelines as getTomorrow } from './wxQuery-tomorrow.js'
import { mapquestElevSlopeAspect as mapQuest } from '../Globe/elevQuery-mapquest.js'
import { usgsElevSlopeAspect as usgs } from '../Globe/elevQuery-usgs.js'
export class FireBehaviorForecaster {
constructor () {
this.elevationApi = 'mapquest.com' // or 'usgs.gov'
this.weatherApi = 'tomorrow.io' // or 'weatherapi.com'
this.sim = new Sim()
this.dag = this.sim.createDag('FireForecast')
this.dag.configure([
['configure.fuel.primary', 'catalog'], // The primary fuel is specified by a fuel model catalog key
['configure.fuel.secondary', 'none'], // There are no secondary fuels
['configure.fuel.moisture', 'fosberg'], // 3 dead moisture classes and a singe live category moisture
['configure.fuel.curedHerbFraction', 'input'],
['configure.wind.speed', 'at10m'],
['configure.wind.direction', 'sourceFromNorth'],
['configure.slope.steepness', 'ratio'],
['configure.fuel.windSpeedAdjustmentFactor', 'input'],
['configure.fire.vector', 'fromHead'],
['configure.temperature.humidity', 'humidity'], // enter dry bulb and humidity
['configure.fuel.chaparralTotalLoad', 'input'], // unimportant
['configure.fire.weightingMethod', 'arithmetic'], // unimportant
['configure.fire.effectiveWindSpeedLimit', 'ignored'],
['configure.fire.firelineIntensity', 'flameLength'],
['configure.fire.lengthToWidthRatio', 'lengthToWidthRatio']
])
this.dag.select([
'surface.primary.fuel.model.behave.parms.cured.herb.fraction', // ratio
'surface.primary.fuel.fire.effectiveWindSpeed', // ft/min
'surface.primary.fuel.fire.flameResidenceTime', // min
'surface.primary.fuel.fire.heading.fromUpslope', // degrees
'surface.primary.fuel.fire.heading.fromNorth', // degrees
'surface.primary.fuel.fire.heatPerUnitArea', // btu/ft2 |
'surface.primary.fuel.fire.reactionIntensity', // btu/ft2/min
'surface.fire.ellipse.axis.lengthToWidthRatio', // ratio
'surface.fire.ellipse.back.firelineIntensity', // Btu/ft/s
'surface.fire.ellipse.back.flameLength', // ft
'surface.fire.ellipse.back.scorchHeight', // ft
'surface.fire.ellipse.back.spreadDistance', // ft
'surface.fire.ellipse.back.spreadRate', // ft/min
'surface.fire.ellipse.flank.firelineIntensity',
'surface.fire.ellipse.flank.flameLength',
'surface.fire.ellipse.flank.scorchHeight',
'surface.fire.ellipse.flank.spreadDistance',
'surface.fire.ellipse.flank.spreadRate',
'surface.fire.ellipse.head.firelineIntensity',
'surface.fire.ellipse.head.flameLength',
'surface.fire.ellipse.head.scorchHeight',
'surface.fire.ellipse.head.spreadDistance',
'surface.fire.ellipse.head.spreadRate',
'surface.fire.ellipse.size.area', // ft2
'surface.fire.ellipse.size.length', // ft
'surface.fire.ellipse.size.perimeter', // ft
'surface.fire.ellipse.size.width', // ft
'site.moisture.dead.tl1h', // ratio
'site.moisture.dead.tl10h',
'site.moisture.dead.tl100h'
])
}
/**
* Adds fire behavior to the weather records
* @param {array} parms Array of input parameters
* @param {array} wxArray Array of hourly weather forecast objects
* @returns {array} wxArray with 16 or so fire behavior properties added
*/
addFireBehavior (parms, wxArray) {
wxArray.forEach(wx => {
const input = {
fuel: parms.fuel,
curedHerb: 0.01 * parms.cured,
month: +(wx.date).substr(5, 2),
hour: +(wx.time).substr(0, 2),
elevDiff: parms.elevdiff,
aspect: parms.aspect,
slope: 0.01 * parms.slope,
dryBulb: wx.dryBulb,
humidity: 0.01 * wx.humidity,
shading: 0.01 * wx.cloudCover,
liveMoisture: 0.01 * parms.live,
windAt10m: 88 * wx.windSpeed,
windGust: 88 * wx.windGust,
windAdj: parms.waf,
windFrom: wx.windFrom,
elapsed: 60
}
const output = this.run(input)
wx.tl1h = 100 * output.moisture.fosberg.tl1h // ratio
wx.tl10h = 100 * output.moisture.tl10h // ratio
wx.tl100h = 100 * output.moisture.tl100h // ratio
wx.spreadRate = output.heading.spreadRate // ft/min
wx.flameLength = output.heading.flameLength // ft
wx.scorchHeight = output.heading.scorchHeight // ft
wx.headingFromNorth = output.fire.headingFromNorth // degrees
wx.gust = {
spreadRate: output.heading.gust.spreadRate, // ft/min
flameLength: output.heading.gust.flameLength, // ft
scorchHeight: output.heading.gust.scorchHeight, // ft
headingFromNorth: output.fire.gust.headingFromNorth // degrees
}
})
return wxArray
}
/**
* Gets fire behavior for the supplied inputs
* @param {array} inp Array of fire behavior input values
* @returns {object} Fire behavior object
*/
run (inp) {
this.dag.input([
['surface.primary.fuel.model.catalogKey', [inp.fuel]],
['surface.primary.fuel.model.behave.parms.cured.herb.fraction', [inp.curedHerb]], // fraction
['site.date.month', [inp.month]],
['site.time.hour', [inp.hour]],
['site.location.elevation.diff', [inp.elevDiff]],
['site.slope.direction.aspect', [inp.aspect]], // degrees clockwise from north
['site.slope.steepness.ratio', [inp.slope]], // vertical rise / horizontal reach
['site.temperature.air', [inp.dryBulb]], // oF
['site.temperature.relativeHumidity', [inp.humidity]], // oF
['site.temperature.shading', [inp.shading]], // oF
['site.moisture.live.herb', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.moisture.live.stem', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.wind.speed.at10m', [inp.windAt10m]], // feet per minute (1 mph = 88 ft/min)
['site.windSpeedAdjustmentFactor', [inp.windAdj]], // fraction of 10m wind speed
['site.wind.direction.source.fromNorth', [inp.windFrom]], // direction of wind origin, degrees clockwise from north
['site.fire.time.sinceIgnition', [inp.elapsed]] // minutes
]).run()
const output = {
fire: {
effectiveWindSpeed: this.dag.node('surface.primary.fuel.fire.effectiveWindSpeed').value(), // ft/min
flameResidenceTime: this.dag.node('surface.primary.fuel.fire.flameResidenceTime').value(), // min
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value(), // degrees
heatPerUnitArea: this.dag.node('surface.primary.fuel.fire.heatPerUnitArea').value(), // btu/ft2 |
reactionIntensity: this.dag.node('surface.primary.fuel.fire.reactionIntensity').value() // btu/ft2/min
},
moisture: {
tl1h: this.dag.node('site.moisture.dead.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.tl100h').value(),
fosberg: {
reference: this.dag.node('site.moisture.dead.fosberg.reference').value(),
correction: this.dag.node('site.moisture.dead.fosberg.correction').value(),
tl1h: this.dag.node('site.moisture.dead.fosberg.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.fosberg.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.fosberg.tl100h').value()
}
},
ellipse: {
lwRatio: this.dag.node('surface.fire.ellipse.axis.lengthToWidthRatio').value(), // ratio
area: this.dag.node('surface.fire.ellipse.size.area').value(), // ft2
length: this.dag.node('surface.fire.ellipse.size.length').value(), // ft
perimeter: this.dag.node('surface.fire.ellipse.size.perimeter').value(), // ft
width: this.dag.node('surface.fire.ellipse.size.width').value() // ft
},
backing: {
firelineIntensity: this.dag.node('surface.fire.ellipse.back.firelineIntensity').value(), // Btu/ft/s
flameLength: this.dag.node('surface.fire.ellipse.back.flameLength').value(), // ft
scorchHeight: this.dag.node('surface.fire.ellipse.back.scorchHeight').value(), // ft
spreadDistance: this.dag.node('surface.fire.ellipse.back.spreadDistance').value(), // ft
spreadRate: this.dag.node('surface.fire.ellipse.back.spreadRate').value() // ft/min
},
flanking: {
firelineIntensity: this.dag.node('surface.fire.ellipse.flank.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.flank.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.flank.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.flank.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.flank.spreadRate').value()
},
heading: {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
}
// Add fire behavior during wind gusts
this.dag.input([
['site.wind.speed.at10m', [inp.windGust]] // feet per minute (1 mph = 88 ft/min)
]).run()
output.heading.gust = {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
output.fire.gust = {
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value() // degrees
}
return output
}
/**
* Display the required configuration nodes
*/
showConfigs () {
const activeConfigs = this.dag.requiredConfigNodes() // returns an array of DagNode references
console.log('ACTIVE CONFIGS:')
activeConfigs.forEach(cfg => { console.log(cfg.key(), cfg.value()) })
}
/**
* Display the required input nodes
*/
showInputs () {
const requiredInputs = this.dag.requiredInputNodes() // returns an array of DagNode references
console.log('REQUIRED INPUTS:')
requiredInputs.forEach(node => { console.log(node.key()) })
}
/**
* Gets the weather and fire forecast - MAIN ENTRY POINT
*
* @param {array} parms
* - name {string} Location name
* - lat {number} Location latitude north (+) or south (-)
* - lon {number} Location longitude east (+) or west (-)
* - timezone {string} Timezone of time values, according to IANA Timezone Names (defaults to 'UTC')
* (see https://docs.tomorrow.io/reference/api-formats#timezone)
* - fuel {string} fuel model key
* - waf {number} wind speed adjustment factor from 20-ft to midflame height (fraction)
* - cured {number} herb cured fraction (%)
* - live {number} live (herb and stem) fuel moisture (%)
* - elevdiff {number} ELevation difference between forecast location and site (ft)
*
* @returns {array} Array of hourly forecast objects
*/
async getForecast (parms) |
}
| {
// configure the time frame up to 6 hours back and 15 days out
const now = moment.utc()
parms.start = moment.utc(now).startOf('hour').toISOString() // "2019-03-20T14:09:50Z"
parms.end = moment.utc(now).add(48, 'hours').toISOString()
// First get elevation, slope, and aspect and add it to the parms
const sampleRes = 1 / (60 * 60 * 3) // 1/3 arc-second in decimal degrees
const cellWidth = 2 // Double sample distance to ensure adjacent cells have different sample
let _esa
if (this.elevationApi === 'usgs.gov') {
_esa = usgs(parms.lat, parms.lon, sampleRes, cellWidth)
} else { // mapquest.com
_esa = mapQuest(parms.lat, parms.lon, sampleRes, cellWidth)
}
// Next get weather data from tomorrow.io or weatherapi.com
let _wx
if (this.weatherApi === 'weatherapi.com') {
_wx = getWeatherapi(parms.lat, parms.lon, 1, 'fire')
} else { // tomorrow.io
_wx = getTomorrow(parms.lat, parms.lon, parms.start, parms.end, parms.timezone)
}
// Run requests in parallel...
const esa = await _esa
const wx = await _wx
parms.elev = esa.elev
parms.slope = 100 * esa.slopeRatio
parms.aspect = esa.aspect
// Add fire behavior to the weather record and return
this.addFireBehavior(parms, wx)
return { parms, wx }
} | identifier_body |
FireBehaviorForecaster.js | /**
* @file FireBehaviorForecaster.js provides site specific terrain, weather, and fire behavior
* for a 48-hour period.
* @copyright 2021 Systems for Environmental Management
* @author Collin D. Bevins, <[email protected]>
* @license MIT
*/
import { Sim } from '@cbevins/fire-behavior-simulator'
import moment from 'moment'
import { getForecast as getWeatherapi } from './wxQuery-weatherapi.js'
import { getTimelines as getTomorrow } from './wxQuery-tomorrow.js'
import { mapquestElevSlopeAspect as mapQuest } from '../Globe/elevQuery-mapquest.js'
import { usgsElevSlopeAspect as usgs } from '../Globe/elevQuery-usgs.js'
export class FireBehaviorForecaster {
constructor () {
this.elevationApi = 'mapquest.com' // or 'usgs.gov'
this.weatherApi = 'tomorrow.io' // or 'weatherapi.com'
this.sim = new Sim()
this.dag = this.sim.createDag('FireForecast')
this.dag.configure([
['configure.fuel.primary', 'catalog'], // The primary fuel is specified by a fuel model catalog key
['configure.fuel.secondary', 'none'], // There are no secondary fuels
['configure.fuel.moisture', 'fosberg'], // 3 dead moisture classes and a singe live category moisture
['configure.fuel.curedHerbFraction', 'input'],
['configure.wind.speed', 'at10m'],
['configure.wind.direction', 'sourceFromNorth'],
['configure.slope.steepness', 'ratio'],
['configure.fuel.windSpeedAdjustmentFactor', 'input'],
['configure.fire.vector', 'fromHead'],
['configure.temperature.humidity', 'humidity'], // enter dry bulb and humidity
['configure.fuel.chaparralTotalLoad', 'input'], // unimportant
['configure.fire.weightingMethod', 'arithmetic'], // unimportant
['configure.fire.effectiveWindSpeedLimit', 'ignored'],
['configure.fire.firelineIntensity', 'flameLength'],
['configure.fire.lengthToWidthRatio', 'lengthToWidthRatio']
])
this.dag.select([
'surface.primary.fuel.model.behave.parms.cured.herb.fraction', // ratio
'surface.primary.fuel.fire.effectiveWindSpeed', // ft/min
'surface.primary.fuel.fire.flameResidenceTime', // min
'surface.primary.fuel.fire.heading.fromUpslope', // degrees
'surface.primary.fuel.fire.heading.fromNorth', // degrees
'surface.primary.fuel.fire.heatPerUnitArea', // btu/ft2 |
'surface.primary.fuel.fire.reactionIntensity', // btu/ft2/min
'surface.fire.ellipse.axis.lengthToWidthRatio', // ratio
'surface.fire.ellipse.back.firelineIntensity', // Btu/ft/s
'surface.fire.ellipse.back.flameLength', // ft
'surface.fire.ellipse.back.scorchHeight', // ft
'surface.fire.ellipse.back.spreadDistance', // ft
'surface.fire.ellipse.back.spreadRate', // ft/min
'surface.fire.ellipse.flank.firelineIntensity',
'surface.fire.ellipse.flank.flameLength',
'surface.fire.ellipse.flank.scorchHeight',
'surface.fire.ellipse.flank.spreadDistance',
'surface.fire.ellipse.flank.spreadRate',
'surface.fire.ellipse.head.firelineIntensity',
'surface.fire.ellipse.head.flameLength',
'surface.fire.ellipse.head.scorchHeight',
'surface.fire.ellipse.head.spreadDistance',
'surface.fire.ellipse.head.spreadRate',
'surface.fire.ellipse.size.area', // ft2
'surface.fire.ellipse.size.length', // ft
'surface.fire.ellipse.size.perimeter', // ft
'surface.fire.ellipse.size.width', // ft
'site.moisture.dead.tl1h', // ratio
'site.moisture.dead.tl10h',
'site.moisture.dead.tl100h'
])
}
/**
* Adds fire behavior to the weather records
* @param {array} parms Array of input parameters
* @param {array} wxArray Array of hourly weather forecast objects
* @returns {array} wxArray with 16 or so fire behavior properties added
*/
addFireBehavior (parms, wxArray) {
wxArray.forEach(wx => {
const input = {
fuel: parms.fuel,
curedHerb: 0.01 * parms.cured,
month: +(wx.date).substr(5, 2),
hour: +(wx.time).substr(0, 2),
elevDiff: parms.elevdiff,
aspect: parms.aspect,
slope: 0.01 * parms.slope,
dryBulb: wx.dryBulb,
humidity: 0.01 * wx.humidity,
shading: 0.01 * wx.cloudCover,
liveMoisture: 0.01 * parms.live,
windAt10m: 88 * wx.windSpeed,
windGust: 88 * wx.windGust,
windAdj: parms.waf,
windFrom: wx.windFrom,
elapsed: 60
}
const output = this.run(input)
wx.tl1h = 100 * output.moisture.fosberg.tl1h // ratio
wx.tl10h = 100 * output.moisture.tl10h // ratio
wx.tl100h = 100 * output.moisture.tl100h // ratio
wx.spreadRate = output.heading.spreadRate // ft/min
wx.flameLength = output.heading.flameLength // ft
wx.scorchHeight = output.heading.scorchHeight // ft
wx.headingFromNorth = output.fire.headingFromNorth // degrees
wx.gust = {
spreadRate: output.heading.gust.spreadRate, // ft/min
flameLength: output.heading.gust.flameLength, // ft
scorchHeight: output.heading.gust.scorchHeight, // ft
headingFromNorth: output.fire.gust.headingFromNorth // degrees
}
})
return wxArray
}
/**
* Gets fire behavior for the supplied inputs
* @param {array} inp Array of fire behavior input values
* @returns {object} Fire behavior object
*/
run (inp) {
this.dag.input([
['surface.primary.fuel.model.catalogKey', [inp.fuel]],
['surface.primary.fuel.model.behave.parms.cured.herb.fraction', [inp.curedHerb]], // fraction
['site.date.month', [inp.month]],
['site.time.hour', [inp.hour]],
['site.location.elevation.diff', [inp.elevDiff]],
['site.slope.direction.aspect', [inp.aspect]], // degrees clockwise from north
['site.slope.steepness.ratio', [inp.slope]], // vertical rise / horizontal reach
['site.temperature.air', [inp.dryBulb]], // oF
['site.temperature.relativeHumidity', [inp.humidity]], // oF
['site.temperature.shading', [inp.shading]], // oF
['site.moisture.live.herb', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.moisture.live.stem', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.wind.speed.at10m', [inp.windAt10m]], // feet per minute (1 mph = 88 ft/min)
['site.windSpeedAdjustmentFactor', [inp.windAdj]], // fraction of 10m wind speed
['site.wind.direction.source.fromNorth', [inp.windFrom]], // direction of wind origin, degrees clockwise from north
['site.fire.time.sinceIgnition', [inp.elapsed]] // minutes
]).run()
const output = {
fire: {
effectiveWindSpeed: this.dag.node('surface.primary.fuel.fire.effectiveWindSpeed').value(), // ft/min
flameResidenceTime: this.dag.node('surface.primary.fuel.fire.flameResidenceTime').value(), // min
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value(), // degrees
heatPerUnitArea: this.dag.node('surface.primary.fuel.fire.heatPerUnitArea').value(), // btu/ft2 |
reactionIntensity: this.dag.node('surface.primary.fuel.fire.reactionIntensity').value() // btu/ft2/min
},
moisture: {
tl1h: this.dag.node('site.moisture.dead.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.tl100h').value(),
fosberg: {
reference: this.dag.node('site.moisture.dead.fosberg.reference').value(),
correction: this.dag.node('site.moisture.dead.fosberg.correction').value(),
tl1h: this.dag.node('site.moisture.dead.fosberg.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.fosberg.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.fosberg.tl100h').value()
}
},
ellipse: {
lwRatio: this.dag.node('surface.fire.ellipse.axis.lengthToWidthRatio').value(), // ratio
area: this.dag.node('surface.fire.ellipse.size.area').value(), // ft2
length: this.dag.node('surface.fire.ellipse.size.length').value(), // ft
perimeter: this.dag.node('surface.fire.ellipse.size.perimeter').value(), // ft
width: this.dag.node('surface.fire.ellipse.size.width').value() // ft
},
backing: {
firelineIntensity: this.dag.node('surface.fire.ellipse.back.firelineIntensity').value(), // Btu/ft/s
flameLength: this.dag.node('surface.fire.ellipse.back.flameLength').value(), // ft
scorchHeight: this.dag.node('surface.fire.ellipse.back.scorchHeight').value(), // ft
spreadDistance: this.dag.node('surface.fire.ellipse.back.spreadDistance').value(), // ft
spreadRate: this.dag.node('surface.fire.ellipse.back.spreadRate').value() // ft/min
},
flanking: {
firelineIntensity: this.dag.node('surface.fire.ellipse.flank.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.flank.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.flank.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.flank.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.flank.spreadRate').value()
},
heading: {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
}
// Add fire behavior during wind gusts
this.dag.input([
['site.wind.speed.at10m', [inp.windGust]] // feet per minute (1 mph = 88 ft/min)
]).run()
output.heading.gust = {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
output.fire.gust = {
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value() // degrees
}
return output
}
/**
* Display the required configuration nodes
*/
showConfigs () {
const activeConfigs = this.dag.requiredConfigNodes() // returns an array of DagNode references
console.log('ACTIVE CONFIGS:')
activeConfigs.forEach(cfg => { console.log(cfg.key(), cfg.value()) })
}
/**
* Display the required input nodes
*/
showInputs () {
const requiredInputs = this.dag.requiredInputNodes() // returns an array of DagNode references
console.log('REQUIRED INPUTS:')
requiredInputs.forEach(node => { console.log(node.key()) })
}
/**
* Gets the weather and fire forecast - MAIN ENTRY POINT
*
* @param {array} parms
* - name {string} Location name
* - lat {number} Location latitude north (+) or south (-)
* - lon {number} Location longitude east (+) or west (-)
* - timezone {string} Timezone of time values, according to IANA Timezone Names (defaults to 'UTC')
* (see https://docs.tomorrow.io/reference/api-formats#timezone)
* - fuel {string} fuel model key
* - waf {number} wind speed adjustment factor from 20-ft to midflame height (fraction)
* - cured {number} herb cured fraction (%)
* - live {number} live (herb and stem) fuel moisture (%)
* - elevdiff {number} ELevation difference between forecast location and site (ft)
*
* @returns {array} Array of hourly forecast objects
*/
async getForecast (parms) {
// configure the time frame up to 6 hours back and 15 days out
const now = moment.utc()
parms.start = moment.utc(now).startOf('hour').toISOString() // "2019-03-20T14:09:50Z"
parms.end = moment.utc(now).add(48, 'hours').toISOString()
// First get elevation, slope, and aspect and add it to the parms
const sampleRes = 1 / (60 * 60 * 3) // 1/3 arc-second in decimal degrees
const cellWidth = 2 // Double sample distance to ensure adjacent cells have different sample
let _esa
if (this.elevationApi === 'usgs.gov') {
_esa = usgs(parms.lat, parms.lon, sampleRes, cellWidth)
} else { // mapquest.com
_esa = mapQuest(parms.lat, parms.lon, sampleRes, cellWidth)
}
// Next get weather data from tomorrow.io or weatherapi.com
let _wx
if (this.weatherApi === 'weatherapi.com') | else { // tomorrow.io
_wx = getTomorrow(parms.lat, parms.lon, parms.start, parms.end, parms.timezone)
}
// Run requests in parallel...
const esa = await _esa
const wx = await _wx
parms.elev = esa.elev
parms.slope = 100 * esa.slopeRatio
parms.aspect = esa.aspect
// Add fire behavior to the weather record and return
this.addFireBehavior(parms, wx)
return { parms, wx }
}
}
| {
_wx = getWeatherapi(parms.lat, parms.lon, 1, 'fire')
} | conditional_block |
FireBehaviorForecaster.js | /**
* @file FireBehaviorForecaster.js provides site specific terrain, weather, and fire behavior
* for a 48-hour period.
* @copyright 2021 Systems for Environmental Management
* @author Collin D. Bevins, <[email protected]>
* @license MIT
*/
import { Sim } from '@cbevins/fire-behavior-simulator'
import moment from 'moment'
import { getForecast as getWeatherapi } from './wxQuery-weatherapi.js'
import { getTimelines as getTomorrow } from './wxQuery-tomorrow.js'
import { mapquestElevSlopeAspect as mapQuest } from '../Globe/elevQuery-mapquest.js'
import { usgsElevSlopeAspect as usgs } from '../Globe/elevQuery-usgs.js'
export class FireBehaviorForecaster {
constructor () {
this.elevationApi = 'mapquest.com' // or 'usgs.gov'
this.weatherApi = 'tomorrow.io' // or 'weatherapi.com'
this.sim = new Sim()
this.dag = this.sim.createDag('FireForecast')
this.dag.configure([
['configure.fuel.primary', 'catalog'], // The primary fuel is specified by a fuel model catalog key
['configure.fuel.secondary', 'none'], // There are no secondary fuels
['configure.fuel.moisture', 'fosberg'], // 3 dead moisture classes and a singe live category moisture
['configure.fuel.curedHerbFraction', 'input'],
['configure.wind.speed', 'at10m'],
['configure.wind.direction', 'sourceFromNorth'],
['configure.slope.steepness', 'ratio'],
['configure.fuel.windSpeedAdjustmentFactor', 'input'],
['configure.fire.vector', 'fromHead'],
['configure.temperature.humidity', 'humidity'], // enter dry bulb and humidity
['configure.fuel.chaparralTotalLoad', 'input'], // unimportant
['configure.fire.weightingMethod', 'arithmetic'], // unimportant
['configure.fire.effectiveWindSpeedLimit', 'ignored'],
['configure.fire.firelineIntensity', 'flameLength'],
['configure.fire.lengthToWidthRatio', 'lengthToWidthRatio']
])
this.dag.select([
'surface.primary.fuel.model.behave.parms.cured.herb.fraction', // ratio
'surface.primary.fuel.fire.effectiveWindSpeed', // ft/min
'surface.primary.fuel.fire.flameResidenceTime', // min
'surface.primary.fuel.fire.heading.fromUpslope', // degrees | 'surface.primary.fuel.fire.heatPerUnitArea', // btu/ft2 |
'surface.primary.fuel.fire.reactionIntensity', // btu/ft2/min
'surface.fire.ellipse.axis.lengthToWidthRatio', // ratio
'surface.fire.ellipse.back.firelineIntensity', // Btu/ft/s
'surface.fire.ellipse.back.flameLength', // ft
'surface.fire.ellipse.back.scorchHeight', // ft
'surface.fire.ellipse.back.spreadDistance', // ft
'surface.fire.ellipse.back.spreadRate', // ft/min
'surface.fire.ellipse.flank.firelineIntensity',
'surface.fire.ellipse.flank.flameLength',
'surface.fire.ellipse.flank.scorchHeight',
'surface.fire.ellipse.flank.spreadDistance',
'surface.fire.ellipse.flank.spreadRate',
'surface.fire.ellipse.head.firelineIntensity',
'surface.fire.ellipse.head.flameLength',
'surface.fire.ellipse.head.scorchHeight',
'surface.fire.ellipse.head.spreadDistance',
'surface.fire.ellipse.head.spreadRate',
'surface.fire.ellipse.size.area', // ft2
'surface.fire.ellipse.size.length', // ft
'surface.fire.ellipse.size.perimeter', // ft
'surface.fire.ellipse.size.width', // ft
'site.moisture.dead.tl1h', // ratio
'site.moisture.dead.tl10h',
'site.moisture.dead.tl100h'
])
}
/**
* Adds fire behavior to the weather records
* @param {array} parms Array of input parameters
* @param {array} wxArray Array of hourly weather forecast objects
* @returns {array} wxArray with 16 or so fire behavior properties added
*/
addFireBehavior (parms, wxArray) {
wxArray.forEach(wx => {
const input = {
fuel: parms.fuel,
curedHerb: 0.01 * parms.cured,
month: +(wx.date).substr(5, 2),
hour: +(wx.time).substr(0, 2),
elevDiff: parms.elevdiff,
aspect: parms.aspect,
slope: 0.01 * parms.slope,
dryBulb: wx.dryBulb,
humidity: 0.01 * wx.humidity,
shading: 0.01 * wx.cloudCover,
liveMoisture: 0.01 * parms.live,
windAt10m: 88 * wx.windSpeed,
windGust: 88 * wx.windGust,
windAdj: parms.waf,
windFrom: wx.windFrom,
elapsed: 60
}
const output = this.run(input)
wx.tl1h = 100 * output.moisture.fosberg.tl1h // ratio
wx.tl10h = 100 * output.moisture.tl10h // ratio
wx.tl100h = 100 * output.moisture.tl100h // ratio
wx.spreadRate = output.heading.spreadRate // ft/min
wx.flameLength = output.heading.flameLength // ft
wx.scorchHeight = output.heading.scorchHeight // ft
wx.headingFromNorth = output.fire.headingFromNorth // degrees
wx.gust = {
spreadRate: output.heading.gust.spreadRate, // ft/min
flameLength: output.heading.gust.flameLength, // ft
scorchHeight: output.heading.gust.scorchHeight, // ft
headingFromNorth: output.fire.gust.headingFromNorth // degrees
}
})
return wxArray
}
/**
* Gets fire behavior for the supplied inputs
* @param {array} inp Array of fire behavior input values
* @returns {object} Fire behavior object
*/
run (inp) {
this.dag.input([
['surface.primary.fuel.model.catalogKey', [inp.fuel]],
['surface.primary.fuel.model.behave.parms.cured.herb.fraction', [inp.curedHerb]], // fraction
['site.date.month', [inp.month]],
['site.time.hour', [inp.hour]],
['site.location.elevation.diff', [inp.elevDiff]],
['site.slope.direction.aspect', [inp.aspect]], // degrees clockwise from north
['site.slope.steepness.ratio', [inp.slope]], // vertical rise / horizontal reach
['site.temperature.air', [inp.dryBulb]], // oF
['site.temperature.relativeHumidity', [inp.humidity]], // oF
['site.temperature.shading', [inp.shading]], // oF
['site.moisture.live.herb', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.moisture.live.stem', [inp.liveMoisture]], // fraction of fuel ovendry weight
['site.wind.speed.at10m', [inp.windAt10m]], // feet per minute (1 mph = 88 ft/min)
['site.windSpeedAdjustmentFactor', [inp.windAdj]], // fraction of 10m wind speed
['site.wind.direction.source.fromNorth', [inp.windFrom]], // direction of wind origin, degrees clockwise from north
['site.fire.time.sinceIgnition', [inp.elapsed]] // minutes
]).run()
const output = {
fire: {
effectiveWindSpeed: this.dag.node('surface.primary.fuel.fire.effectiveWindSpeed').value(), // ft/min
flameResidenceTime: this.dag.node('surface.primary.fuel.fire.flameResidenceTime').value(), // min
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value(), // degrees
heatPerUnitArea: this.dag.node('surface.primary.fuel.fire.heatPerUnitArea').value(), // btu/ft2 |
reactionIntensity: this.dag.node('surface.primary.fuel.fire.reactionIntensity').value() // btu/ft2/min
},
moisture: {
tl1h: this.dag.node('site.moisture.dead.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.tl100h').value(),
fosberg: {
reference: this.dag.node('site.moisture.dead.fosberg.reference').value(),
correction: this.dag.node('site.moisture.dead.fosberg.correction').value(),
tl1h: this.dag.node('site.moisture.dead.fosberg.tl1h').value(),
tl10h: this.dag.node('site.moisture.dead.fosberg.tl10h').value(),
tl100h: this.dag.node('site.moisture.dead.fosberg.tl100h').value()
}
},
ellipse: {
lwRatio: this.dag.node('surface.fire.ellipse.axis.lengthToWidthRatio').value(), // ratio
area: this.dag.node('surface.fire.ellipse.size.area').value(), // ft2
length: this.dag.node('surface.fire.ellipse.size.length').value(), // ft
perimeter: this.dag.node('surface.fire.ellipse.size.perimeter').value(), // ft
width: this.dag.node('surface.fire.ellipse.size.width').value() // ft
},
backing: {
firelineIntensity: this.dag.node('surface.fire.ellipse.back.firelineIntensity').value(), // Btu/ft/s
flameLength: this.dag.node('surface.fire.ellipse.back.flameLength').value(), // ft
scorchHeight: this.dag.node('surface.fire.ellipse.back.scorchHeight').value(), // ft
spreadDistance: this.dag.node('surface.fire.ellipse.back.spreadDistance').value(), // ft
spreadRate: this.dag.node('surface.fire.ellipse.back.spreadRate').value() // ft/min
},
flanking: {
firelineIntensity: this.dag.node('surface.fire.ellipse.flank.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.flank.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.flank.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.flank.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.flank.spreadRate').value()
},
heading: {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
}
// Add fire behavior during wind gusts
this.dag.input([
['site.wind.speed.at10m', [inp.windGust]] // feet per minute (1 mph = 88 ft/min)
]).run()
output.heading.gust = {
firelineIntensity: this.dag.node('surface.fire.ellipse.head.firelineIntensity').value(),
flameLength: this.dag.node('surface.fire.ellipse.head.flameLength').value(),
scorchHeight: this.dag.node('surface.fire.ellipse.head.scorchHeight').value(),
spreadDistance: this.dag.node('surface.fire.ellipse.head.spreadDistance').value(),
spreadRate: this.dag.node('surface.fire.ellipse.head.spreadRate').value()
}
output.fire.gust = {
headingFromUpslope: this.dag.node('surface.primary.fuel.fire.heading.fromUpslope').value(), // degrees
headingFromNorth: this.dag.node('surface.primary.fuel.fire.heading.fromNorth').value() // degrees
}
return output
}
/**
* Display the required configuration nodes
*/
showConfigs () {
const activeConfigs = this.dag.requiredConfigNodes() // returns an array of DagNode references
console.log('ACTIVE CONFIGS:')
activeConfigs.forEach(cfg => { console.log(cfg.key(), cfg.value()) })
}
/**
* Display the required input nodes
*/
showInputs () {
const requiredInputs = this.dag.requiredInputNodes() // returns an array of DagNode references
console.log('REQUIRED INPUTS:')
requiredInputs.forEach(node => { console.log(node.key()) })
}
/**
* Gets the weather and fire forecast - MAIN ENTRY POINT
*
* @param {array} parms
* - name {string} Location name
* - lat {number} Location latitude north (+) or south (-)
* - lon {number} Location longitude east (+) or west (-)
* - timezone {string} Timezone of time values, according to IANA Timezone Names (defaults to 'UTC')
* (see https://docs.tomorrow.io/reference/api-formats#timezone)
* - fuel {string} fuel model key
* - waf {number} wind speed adjustment factor from 20-ft to midflame height (fraction)
* - cured {number} herb cured fraction (%)
* - live {number} live (herb and stem) fuel moisture (%)
* - elevdiff {number} ELevation difference between forecast location and site (ft)
*
* @returns {array} Array of hourly forecast objects
*/
async getForecast (parms) {
// configure the time frame up to 6 hours back and 15 days out
const now = moment.utc()
parms.start = moment.utc(now).startOf('hour').toISOString() // "2019-03-20T14:09:50Z"
parms.end = moment.utc(now).add(48, 'hours').toISOString()
// First get elevation, slope, and aspect and add it to the parms
const sampleRes = 1 / (60 * 60 * 3) // 1/3 arc-second in decimal degrees
const cellWidth = 2 // Double sample distance to ensure adjacent cells have different sample
let _esa
if (this.elevationApi === 'usgs.gov') {
_esa = usgs(parms.lat, parms.lon, sampleRes, cellWidth)
} else { // mapquest.com
_esa = mapQuest(parms.lat, parms.lon, sampleRes, cellWidth)
}
// Next get weather data from tomorrow.io or weatherapi.com
let _wx
if (this.weatherApi === 'weatherapi.com') {
_wx = getWeatherapi(parms.lat, parms.lon, 1, 'fire')
} else { // tomorrow.io
_wx = getTomorrow(parms.lat, parms.lon, parms.start, parms.end, parms.timezone)
}
// Run requests in parallel...
const esa = await _esa
const wx = await _wx
parms.elev = esa.elev
parms.slope = 100 * esa.slopeRatio
parms.aspect = esa.aspect
// Add fire behavior to the weather record and return
this.addFireBehavior(parms, wx)
return { parms, wx }
}
} | 'surface.primary.fuel.fire.heading.fromNorth', // degrees | random_line_split |
slack.go | package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"github.com/nlopes/slack"
)
// SlackClient is
type SlackClient struct {
client *slack.Client
verificationToken string
channelID string
}
type slackMsg struct {
text string
ts string
channel string
reaction string
translated string
source string
target string
}
// automatically generated using the following
// https://mholt.github.io/json-to-go/
type slackEvent struct {
// only url_verification event
// https://api.slack.com/events/url_verification
Challenge string `json:"challenge"`
Token string `json:"token"`
TeamID string `json:"team_id"`
APIAppID string `json:"api_app_id"`
Event struct {
Type string `json:"type"`
User string `json:"user"`
Item struct {
Type string `json:"type"`
Channel string `json:"channel"`
Ts string `json:"ts"`
} `json:"item"`
Reaction string `json:"reaction"`
ItemUser string `json:"item_user"`
EventTs string `json:"event_ts"`
} `json:"event"`
Type string `json:"type"`
EventID string `json:"event_id"`
EventTime int `json:"event_time"`
AuthedUsers []string `json:"authed_users"`
}
// https://api.slack.com/events/url_verification
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P",
"type": "url_verification"
}
*/
// https://api.slack.com/events-api#receiving_events
// https://api.slack.com/events/reaction_added
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"team_id": "T0123086H",
"api_app_id": "A1231KF9R",
"event": {
"type": "reaction_added",
"user": "U0G9QF9C6",
"item": {
"type": "message",
"channel": "C0G9QF9GZ",
"ts": "1518504389.000166"
},
"reaction": "eyes",
"item_user": "U0G9QF9C6",
"event_ts": "1518507482.000119"
},
"type": "event_callback",
"event_id": "Ev97FS5N0Y",
"event_time": 1518507482,
"authed_users": [
"U0G9QF9C6"
]
}
*/
// flag emoji to language
var flagMap = map[string]string{
"flag-ac": "English",
"flag-ad": "Catalan",
"flag-ae": "Arabic",
"flag-af": "Pashto",
"flag-ag": "English",
"flag-ai": "English",
"flag-al": "Albanian",
"flag-am": "Armenian",
"flag-ao": "Portuguese",
"flag-ar": "Spanish",
"flag-as": "English",
"flag-at": "German",
"flag-au": "English",
"flag-aw": "Dutch",
"flag-ax": "Swedish",
"flag-az": "Spanish",
"flag-ba": "Bosnian",
"flag-bb": "English",
"flag-bd": "Bengali",
"flag-be": "Dutch",
"flag-bf": "French",
"flag-bg": "Bulgarian",
"flag-bh": "Arabic",
"flag-bi": "French",
"flag-bj": "French",
"flag-bl": "French",
"flag-bn": "English",
"flag-bm": "Malay",
"flag-bo": "Spanish",
"flag-bq": "Dutch",
"flag-br": "Portuguese",
"flag-bs": "English",
"flag-bt": "Dzongkha",
"flag-bv": "Norwegian",
"flag-bw": "English",
"flag-by": "Belarusian",
"flag-bz": "English",
"flag-ca": "English",
"flag-cc": "Malay",
"flag-cd": "French",
"flag-cf": "French",
"flag-cg": "French",
"flag-ch": "German",
"flag-ci": "French",
"flag-ck": "English",
"flag-cl": "Spanish",
"flag-cm": "French",
"flag-cn": "Chinese Simplified",
"flag-co": "Spanish",
"flag-cp": "French",
"flag-cr": "Spanish",
"flag-cu": "Spanish",
"flag-cv": "Portuguese",
"flag-cw": "Dutch",
"flag-cx": "English",
"flag-cy": "Greek",
"flag-cz": "Czech",
"flag-de": "German",
"flag-dg": "English",
"flag-dj": "French",
"flag-dk": "Danish",
"flag-dm": "English",
"flag-do": "Spanish",
"flag-dz": "Arabic",
"flag-ea": "Spanish",
"flag-ec": "Spanish",
"flag-ee": "Estonian",
"flag-eg": "Arabic",
"flag-eh": "Arabic",
"flag-er": "Arabic",
"flag-es": "Spanish",
"flag-et": "Oromo",
"flag-fi": "Finnish",
"flag-fj": "English",
"flag-fk": "English",
"flag-fm": "English",
"flag-fr": "French",
"flag-ga": "French",
"flag-gb": "English",
"flag-gd": "English",
"flag-ge": "Georgian",
"flag-gf": "French",
"flag-gg": "English",
"flag-gh": "English",
"flag-gi": "English",
"flag-gl": "Danish",
"flag-gm": "English",
"flag-gn": "French",
"flag-gp": "French",
"flag-gq": "Spanish",
"flag-gr": "Greek",
"flag-gs": "English",
"flag-gt": "Spanish",
"flag-gu": "English",
"flag-gw": "Portuguese",
"flag-gy": "English",
"flag-hk": "Chinese Traditional",
"flag-hn": "Spanish",
"flag-hr": "Croatian",
"flag-ht": "Haitian Creole",
"flag-hu": "Hungarian",
"flag-ic": "Spanish",
"flag-id": "Indonesian",
"flag-ie": "Irish",
"flag-il": "Hebrew",
"flag-im": "English",
"flag-in": "Hindi",
"flag-io": "English",
"flag-iq": "Arabic",
"flag-ir": "Persian",
"flag-is": "Icelandic",
"flag-it": "Italian",
"flag-je": "English",
"flag-jm": "English",
"flag-jo": "Arabic",
"flag-jp": "Japanese",
"flag-ke": "English",
"flag-kg": "Kyrgyz",
"flag-kh": "Khmer",
"flag-ki": "English",
"flag-kn": "English",
"flag-kp": "Korean",
"flag-kr": "Korean",
"flag-kw": "Arabic",
"flag-ky": "English",
"flag-kz": "Kazakh",
"flag-la": "Lao",
"flag-lb": "Arabic",
"flag-lc": "English",
"flag-li": "German",
"flag-lk": "Sinhala",
"flag-lr": "English",
"flag-ls": "Sesotho",
"flag-lt": "Lithuanian",
"flag-lu": "Luxembourgish",
"flag-lv": "Latvian",
"flag-ly": "Arabic",
"flag-ma": "Arabic",
"flag-mc": "French",
"flag-md": "Romanian",
"flag-mg": "Malagasy",
"flag-mh": "Marshallese",
"flag-mk": "Macedonian",
"flag-ml": "French",
"flag-mm": "Burmese",
"flag-mn": "Mongolian",
"flag-mo": "Chinese Traditional",
"flag-mp": "English",
"flag-mq": "French",
"flag-mr": "Arabic",
"flag-ms": "English",
"flag-mt": "Maltese",
"flag-mu": "English",
"flag-mv": "Dhivehi",
"flag-mw": "English",
"flag-mx": "Spanish",
"flag-my": "Malay",
"flag-mz": "Portuguese",
"flag-na": "English",
"flag-nc": "French",
"flag-ne": "French",
"flag-nf": "English",
"flag-ng": "English",
"flag-ni": "Spanish",
"flag-nl": "Dutch",
"flag-no": "Norwegian",
"flag-np": "Nepali",
"flag-nr": "Nauru",
"flag-nu": "Niuean",
"flag-nz": "English",
"flag-om": "Arabic",
"flag-pa": "Spanish",
"flag-pe": "Spanish",
"flag-pf": "French",
"flag-pg": "English",
"flag-ph": "Tagalog",
"flag-pk": "Urdu",
"flag-pl": "Polish",
"flag-pm": "French",
"flag-pn": "English",
"flag-pr": "Spanish",
"flag-ps": "Arabic",
"flag-pt": "Portuguese",
"flag-pw": "English",
"flag-py": "Spanish",
"flag-qa": "Arabic",
"flag-re": "French",
"flag-ro": "Romanian",
"flag-rs": "Serbian",
"flag-ru": "Russian",
"flag-rw": "Kinyarwanda",
"flag-sa": "Arabic",
"flag-sb": "English",
"flag-sc": "English",
"flag-sd": "Arabic",
"flag-se": "Swedish",
"flag-sg": "English",
"flag-sh": "English",
"flag-si": "Slovenian",
"flag-sj": "Norwegian",
"flag-sk": "Slovak",
"flag-sl": "English",
"flag-sm": "Italian",
"flag-sn": "French",
"flag-so": "Somali",
"flag-sr": "Dutch",
"flag-ss": "English",
"flag-st": "Portuguese",
"flag-sv": "Spanish",
"flag-sx": "Dutch",
"flag-sw": "Arabic",
"flag-sz": "Swati",
"flag-ta": "English",
"flag-tc": "English",
"flag-td": "French",
"flag-tf": "French",
"flag-tg": "French",
"flag-th": "Thai",
"flag-tj": "Tajik",
"flag-tk": "Tokelau",
"flag-tl": "Tetum",
"flag-tm": "Turkmen",
"flag-tn": "Arabic",
"flag-tr": "Turkish",
"flag-tt": "English",
"flag-tv": "Tuvalua",
"flag-tw": "Chinese Traditional",
"flag-tz": "Swahili",
"flag-ua": "Ukrainian",
"flag-ug": "English",
"flag-um": "English",
"flag-us": "English",
"flag-uy": "Spanish",
"flag-uz": "Uzbek",
"flag-va": "Italian",
"flag-vc": "English",
"flag-ve": "Spanish",
"flag-vg": "English",
"flag-vi": "English",
"flag-vn": "Vietnamese",
"flag-vu": "English",
"flag-wf": "French",
"flag-ws": "Samoan",
"flag-xk": "Albanian",
"flag-ye": "Arabic",
"flag-yt": "French",
"flag-za": "Afrikaans",
"flag-zm": "English",
"flag-zw": "English",
"flag-to": "",
"flag-me": "",
"flag-km": "",
"flag-hm": "",
"flag-mf": "Saint Martin",
"flag-fo": "Faroe Islands",
"flag-eu": "EU",
"flag-aq": "Antarctica",
}
func (c *SlackClient) handleEvent(data string) (string, error) {
var se slackEvent
if err := json.Unmarshal([]byte(data), &se); err != nil {
log.Println("[Error] JSON unmarshal error:", err)
return "", err
}
// check verification token
if se.Token != c.verificationToken {
log.Println("[Error] slack verification token do not match error: ", se.Token)
return "", errors.New("slack verification token do not match!!/n")
}
// url verification
if se.Type == "url_verification" {
log.Println("[Accepted] url_verification event")
return fmt.Sprintf(`{"challenge": %s}`, se.Challenge), nil
}
if se.Event.Type != "reaction_added" {
log.Println("[Rejected] slack event type do not 'reaction_added': ", se.Event.Type)
return "", nil
}
// filter the channel?
if c.channelID != "" {
if c.channelID != se.Event.Item.Channel {
log.Println("[Rejected] slack channel ID do not match: ", se.Event.Item.Channel)
return "", nil
}
}
// determine the language from the flag emoji
reactionText := se.Event.Reaction
if !strings.HasPrefix(reactionText, "flag-") {
reactionText = "flag-" + reactionText
}
targetCode := GetLanguageCode(flagMap[reactionText])
if targetCode == "" {
log.Println("[Rejected] it does not correspond to that emoji reaction: ", se.Event.Reaction)
return "", nil
}
// get slack message
msg, err := c.getMessage(se.Event.Item.Channel, se.Event.Item.Ts)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return "", errors.New("failed to get slack messages/n")
}
// estimate language from original text
awsClient := NewAwsClient()
sourceCode, err := awsClient.detectLanguageCode(msg.text)
if err != nil {
log.Println("[Error] failed to get language code: ", err)
return "", err
}
// translate text
translatedText, err := awsClient.translate(msg.text, sourceCode, targetCode)
if err != nil {
log.Println("[Error] failed to translate message: ", err)
return "", err
}
// return translation result to slack
msg.channel = se.Event.Item.Channel
msg.reaction = se.Event.Reaction
msg.translated = translatedText
msg.source = sourceCode
msg.target = targetCode
err = c.postMessage(msg)
if err != nil {
log.Println("[Error] failed to post slack message: ", err)
return "", err
}
return "", nil
}
// https://api.slack.com/methods/chat.postMessage
func (c *SlackClient) postMessage(msg *slackMsg) error {
attachment := slack.Attachment{}
attachment.Pretext = fmt.Sprintf("_The message is translated in_ :%s: _(%s-%s)_", msg.reaction, msg.source, msg.target)
attachment.Text = msg.translated
attachment.Footer = msg.text
attachment.MarkdownIn = []string{"text", "pretext"}
params := slack.NewPostMessageParameters()
params.ThreadTimestamp = msg.ts
params.AsUser = false
params.Attachments = []slack.Attachment{attachment}
_, _, err := c.client.PostMessage(msg.channel, "", params)
if err != nil {
log.Println("[Error] failed to post slack messages: ", err)
return err
}
return nil
}
// https://api.slack.com/methods/conversations.replies
func (c *SlackClient) getMessage(id string, ts string) (*slackMsg, error) | {
params := &slack.GetConversationRepliesParameters{}
params.ChannelID = id
params.Timestamp = ts
params.Inclusive = true
params.Limit = 1
// get slack messages
msg, _, _, err := c.client.GetConversationReplies(params)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return nil, err
}
// get message text
slMsg := &slackMsg{}
for _, i := range msg {
slMsg.ts = i.Timestamp
if slMsg.ts == "" {
slMsg.ts = i.ThreadTimestamp
}
slMsg.text = i.Text
if slMsg.text == "" {
for _, j := range i.Attachments {
slMsg.text = j.Text
break
}
}
break
}
return slMsg, nil
} | identifier_body |
|
slack.go | package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"github.com/nlopes/slack"
)
// SlackClient is
type SlackClient struct {
client *slack.Client
verificationToken string
channelID string
}
type slackMsg struct {
text string
ts string
channel string
reaction string
translated string
source string
target string
}
// automatically generated using the following
// https://mholt.github.io/json-to-go/
type slackEvent struct {
// only url_verification event
// https://api.slack.com/events/url_verification
Challenge string `json:"challenge"`
Token string `json:"token"`
TeamID string `json:"team_id"`
APIAppID string `json:"api_app_id"`
Event struct {
Type string `json:"type"`
User string `json:"user"`
Item struct {
Type string `json:"type"`
Channel string `json:"channel"`
Ts string `json:"ts"`
} `json:"item"`
Reaction string `json:"reaction"`
ItemUser string `json:"item_user"`
EventTs string `json:"event_ts"`
} `json:"event"`
Type string `json:"type"`
EventID string `json:"event_id"`
EventTime int `json:"event_time"`
AuthedUsers []string `json:"authed_users"`
}
// https://api.slack.com/events/url_verification
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P",
"type": "url_verification"
}
*/
// https://api.slack.com/events-api#receiving_events
// https://api.slack.com/events/reaction_added
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"team_id": "T0123086H",
"api_app_id": "A1231KF9R",
"event": {
"type": "reaction_added",
"user": "U0G9QF9C6",
"item": {
"type": "message",
"channel": "C0G9QF9GZ",
"ts": "1518504389.000166"
},
"reaction": "eyes",
"item_user": "U0G9QF9C6",
"event_ts": "1518507482.000119"
},
"type": "event_callback",
"event_id": "Ev97FS5N0Y",
"event_time": 1518507482,
"authed_users": [
"U0G9QF9C6"
]
}
*/
// flag emoji to language
var flagMap = map[string]string{
"flag-ac": "English",
"flag-ad": "Catalan",
"flag-ae": "Arabic",
"flag-af": "Pashto",
"flag-ag": "English",
"flag-ai": "English",
"flag-al": "Albanian",
"flag-am": "Armenian",
"flag-ao": "Portuguese",
"flag-ar": "Spanish",
"flag-as": "English",
"flag-at": "German",
"flag-au": "English",
"flag-aw": "Dutch",
"flag-ax": "Swedish",
"flag-az": "Spanish",
"flag-ba": "Bosnian",
"flag-bb": "English",
"flag-bd": "Bengali",
"flag-be": "Dutch",
"flag-bf": "French",
"flag-bg": "Bulgarian",
"flag-bh": "Arabic",
"flag-bi": "French",
"flag-bj": "French",
"flag-bl": "French",
"flag-bn": "English",
"flag-bm": "Malay",
"flag-bo": "Spanish",
"flag-bq": "Dutch",
"flag-br": "Portuguese",
"flag-bs": "English",
"flag-bt": "Dzongkha",
"flag-bv": "Norwegian",
"flag-bw": "English",
"flag-by": "Belarusian",
"flag-bz": "English",
"flag-ca": "English",
"flag-cc": "Malay",
"flag-cd": "French",
"flag-cf": "French",
"flag-cg": "French",
"flag-ch": "German",
"flag-ci": "French",
"flag-ck": "English",
"flag-cl": "Spanish",
"flag-cm": "French",
"flag-cn": "Chinese Simplified",
"flag-co": "Spanish",
"flag-cp": "French",
"flag-cr": "Spanish",
"flag-cu": "Spanish",
"flag-cv": "Portuguese",
"flag-cw": "Dutch",
"flag-cx": "English",
"flag-cy": "Greek",
"flag-cz": "Czech",
"flag-de": "German",
"flag-dg": "English",
"flag-dj": "French",
"flag-dk": "Danish",
"flag-dm": "English",
"flag-do": "Spanish",
"flag-dz": "Arabic",
"flag-ea": "Spanish",
"flag-ec": "Spanish",
"flag-ee": "Estonian",
"flag-eg": "Arabic",
"flag-eh": "Arabic",
"flag-er": "Arabic",
"flag-es": "Spanish",
"flag-et": "Oromo",
"flag-fi": "Finnish",
"flag-fj": "English",
"flag-fk": "English",
"flag-fm": "English",
"flag-fr": "French",
"flag-ga": "French",
"flag-gb": "English",
"flag-gd": "English",
"flag-ge": "Georgian",
"flag-gf": "French",
"flag-gg": "English",
"flag-gh": "English",
"flag-gi": "English",
"flag-gl": "Danish",
"flag-gm": "English",
"flag-gn": "French",
"flag-gp": "French",
"flag-gq": "Spanish",
"flag-gr": "Greek",
"flag-gs": "English",
"flag-gt": "Spanish",
"flag-gu": "English",
"flag-gw": "Portuguese",
"flag-gy": "English",
"flag-hk": "Chinese Traditional",
"flag-hn": "Spanish",
"flag-hr": "Croatian",
"flag-ht": "Haitian Creole",
"flag-hu": "Hungarian",
"flag-ic": "Spanish",
"flag-id": "Indonesian",
"flag-ie": "Irish",
"flag-il": "Hebrew",
"flag-im": "English",
"flag-in": "Hindi",
"flag-io": "English",
"flag-iq": "Arabic",
"flag-ir": "Persian",
"flag-is": "Icelandic",
"flag-it": "Italian",
"flag-je": "English",
"flag-jm": "English",
"flag-jo": "Arabic",
"flag-jp": "Japanese",
"flag-ke": "English",
"flag-kg": "Kyrgyz",
"flag-kh": "Khmer",
"flag-ki": "English",
"flag-kn": "English",
"flag-kp": "Korean",
"flag-kr": "Korean",
"flag-kw": "Arabic",
"flag-ky": "English",
"flag-kz": "Kazakh",
"flag-la": "Lao",
"flag-lb": "Arabic",
"flag-lc": "English",
"flag-li": "German",
"flag-lk": "Sinhala",
"flag-lr": "English",
"flag-ls": "Sesotho",
"flag-lt": "Lithuanian",
"flag-lu": "Luxembourgish",
"flag-lv": "Latvian",
"flag-ly": "Arabic",
"flag-ma": "Arabic",
"flag-mc": "French",
"flag-md": "Romanian",
"flag-mg": "Malagasy",
"flag-mh": "Marshallese",
"flag-mk": "Macedonian",
"flag-ml": "French",
"flag-mm": "Burmese",
"flag-mn": "Mongolian",
"flag-mo": "Chinese Traditional",
"flag-mp": "English",
"flag-mq": "French",
"flag-mr": "Arabic",
"flag-ms": "English",
"flag-mt": "Maltese",
"flag-mu": "English",
"flag-mv": "Dhivehi",
"flag-mw": "English",
"flag-mx": "Spanish",
"flag-my": "Malay",
"flag-mz": "Portuguese",
"flag-na": "English",
"flag-nc": "French",
"flag-ne": "French",
"flag-nf": "English",
"flag-ng": "English",
"flag-ni": "Spanish",
"flag-nl": "Dutch",
"flag-no": "Norwegian",
"flag-np": "Nepali",
"flag-nr": "Nauru",
"flag-nu": "Niuean",
"flag-nz": "English",
"flag-om": "Arabic",
"flag-pa": "Spanish",
"flag-pe": "Spanish",
"flag-pf": "French",
"flag-pg": "English",
"flag-ph": "Tagalog",
"flag-pk": "Urdu",
"flag-pl": "Polish",
"flag-pm": "French",
"flag-pn": "English",
"flag-pr": "Spanish",
"flag-ps": "Arabic",
"flag-pt": "Portuguese",
"flag-pw": "English",
"flag-py": "Spanish",
"flag-qa": "Arabic",
"flag-re": "French",
"flag-ro": "Romanian",
"flag-rs": "Serbian",
"flag-ru": "Russian",
"flag-rw": "Kinyarwanda",
"flag-sa": "Arabic",
"flag-sb": "English",
"flag-sc": "English",
"flag-sd": "Arabic",
"flag-se": "Swedish",
"flag-sg": "English",
"flag-sh": "English",
"flag-si": "Slovenian",
"flag-sj": "Norwegian",
"flag-sk": "Slovak",
"flag-sl": "English",
"flag-sm": "Italian",
"flag-sn": "French",
"flag-so": "Somali",
"flag-sr": "Dutch",
"flag-ss": "English",
"flag-st": "Portuguese",
"flag-sv": "Spanish",
"flag-sx": "Dutch",
"flag-sw": "Arabic",
"flag-sz": "Swati",
"flag-ta": "English",
"flag-tc": "English",
"flag-td": "French",
"flag-tf": "French",
"flag-tg": "French",
"flag-th": "Thai",
"flag-tj": "Tajik",
"flag-tk": "Tokelau",
"flag-tl": "Tetum",
"flag-tm": "Turkmen",
"flag-tn": "Arabic",
"flag-tr": "Turkish",
"flag-tt": "English",
"flag-tv": "Tuvalua",
"flag-tw": "Chinese Traditional",
"flag-tz": "Swahili",
"flag-ua": "Ukrainian",
"flag-ug": "English",
"flag-um": "English",
"flag-us": "English",
"flag-uy": "Spanish",
"flag-uz": "Uzbek",
"flag-va": "Italian",
"flag-vc": "English",
"flag-ve": "Spanish",
"flag-vg": "English",
"flag-vi": "English",
"flag-vn": "Vietnamese",
"flag-vu": "English",
"flag-wf": "French",
"flag-ws": "Samoan",
"flag-xk": "Albanian",
"flag-ye": "Arabic",
"flag-yt": "French",
"flag-za": "Afrikaans",
"flag-zm": "English",
"flag-zw": "English",
"flag-to": "",
"flag-me": "",
"flag-km": "",
"flag-hm": "",
"flag-mf": "Saint Martin",
"flag-fo": "Faroe Islands",
"flag-eu": "EU",
"flag-aq": "Antarctica",
}
func (c *SlackClient) handleEvent(data string) (string, error) {
var se slackEvent
if err := json.Unmarshal([]byte(data), &se); err != nil {
log.Println("[Error] JSON unmarshal error:", err)
return "", err
}
// check verification token
if se.Token != c.verificationToken {
log.Println("[Error] slack verification token do not match error: ", se.Token)
return "", errors.New("slack verification token do not match!!/n")
}
// url verification
if se.Type == "url_verification" {
log.Println("[Accepted] url_verification event")
return fmt.Sprintf(`{"challenge": %s}`, se.Challenge), nil
}
if se.Event.Type != "reaction_added" {
log.Println("[Rejected] slack event type do not 'reaction_added': ", se.Event.Type)
return "", nil
}
// filter the channel?
if c.channelID != "" {
if c.channelID != se.Event.Item.Channel {
log.Println("[Rejected] slack channel ID do not match: ", se.Event.Item.Channel)
return "", nil
}
}
// determine the language from the flag emoji
reactionText := se.Event.Reaction
if !strings.HasPrefix(reactionText, "flag-") {
reactionText = "flag-" + reactionText
}
targetCode := GetLanguageCode(flagMap[reactionText])
if targetCode == "" {
log.Println("[Rejected] it does not correspond to that emoji reaction: ", se.Event.Reaction)
return "", nil
}
// get slack message
msg, err := c.getMessage(se.Event.Item.Channel, se.Event.Item.Ts)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return "", errors.New("failed to get slack messages/n")
}
// estimate language from original text
awsClient := NewAwsClient()
sourceCode, err := awsClient.detectLanguageCode(msg.text)
if err != nil {
log.Println("[Error] failed to get language code: ", err)
return "", err
}
// translate text
translatedText, err := awsClient.translate(msg.text, sourceCode, targetCode)
if err != nil {
log.Println("[Error] failed to translate message: ", err)
return "", err
}
// return translation result to slack
msg.channel = se.Event.Item.Channel
msg.reaction = se.Event.Reaction
msg.translated = translatedText
msg.source = sourceCode
msg.target = targetCode
err = c.postMessage(msg)
if err != nil {
log.Println("[Error] failed to post slack message: ", err)
return "", err
}
return "", nil
}
// https://api.slack.com/methods/chat.postMessage
func (c *SlackClient) postMessage(msg *slackMsg) error {
attachment := slack.Attachment{}
attachment.Pretext = fmt.Sprintf("_The message is translated in_ :%s: _(%s-%s)_", msg.reaction, msg.source, msg.target)
attachment.Text = msg.translated
attachment.Footer = msg.text
attachment.MarkdownIn = []string{"text", "pretext"}
params := slack.NewPostMessageParameters()
params.ThreadTimestamp = msg.ts
params.AsUser = false
params.Attachments = []slack.Attachment{attachment}
_, _, err := c.client.PostMessage(msg.channel, "", params)
if err != nil {
log.Println("[Error] failed to post slack messages: ", err)
return err
}
return nil
}
// https://api.slack.com/methods/conversations.replies
func (c *SlackClient) | (id string, ts string) (*slackMsg, error) {
params := &slack.GetConversationRepliesParameters{}
params.ChannelID = id
params.Timestamp = ts
params.Inclusive = true
params.Limit = 1
// get slack messages
msg, _, _, err := c.client.GetConversationReplies(params)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return nil, err
}
// get message text
slMsg := &slackMsg{}
for _, i := range msg {
slMsg.ts = i.Timestamp
if slMsg.ts == "" {
slMsg.ts = i.ThreadTimestamp
}
slMsg.text = i.Text
if slMsg.text == "" {
for _, j := range i.Attachments {
slMsg.text = j.Text
break
}
}
break
}
return slMsg, nil
}
| getMessage | identifier_name |
slack.go | package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"github.com/nlopes/slack"
)
// SlackClient is
type SlackClient struct {
client *slack.Client
verificationToken string
channelID string
}
type slackMsg struct {
text string
ts string
channel string
reaction string
translated string
source string
target string
}
// automatically generated using the following
// https://mholt.github.io/json-to-go/
type slackEvent struct {
// only url_verification event
// https://api.slack.com/events/url_verification
Challenge string `json:"challenge"`
Token string `json:"token"`
TeamID string `json:"team_id"`
APIAppID string `json:"api_app_id"`
Event struct {
Type string `json:"type"`
User string `json:"user"`
Item struct {
Type string `json:"type"`
Channel string `json:"channel"`
Ts string `json:"ts"`
} `json:"item"`
Reaction string `json:"reaction"`
ItemUser string `json:"item_user"`
EventTs string `json:"event_ts"`
} `json:"event"`
Type string `json:"type"`
EventID string `json:"event_id"`
EventTime int `json:"event_time"`
AuthedUsers []string `json:"authed_users"`
}
// https://api.slack.com/events/url_verification
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P",
"type": "url_verification"
}
*/
// https://api.slack.com/events-api#receiving_events
// https://api.slack.com/events/reaction_added
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"team_id": "T0123086H",
"api_app_id": "A1231KF9R",
"event": {
"type": "reaction_added",
"user": "U0G9QF9C6",
"item": {
"type": "message",
"channel": "C0G9QF9GZ",
"ts": "1518504389.000166"
},
"reaction": "eyes",
"item_user": "U0G9QF9C6",
"event_ts": "1518507482.000119"
},
"type": "event_callback",
"event_id": "Ev97FS5N0Y",
"event_time": 1518507482,
"authed_users": [
"U0G9QF9C6"
]
}
*/
// flag emoji to language
var flagMap = map[string]string{
"flag-ac": "English",
"flag-ad": "Catalan",
"flag-ae": "Arabic",
"flag-af": "Pashto",
"flag-ag": "English",
"flag-ai": "English",
"flag-al": "Albanian",
"flag-am": "Armenian",
"flag-ao": "Portuguese",
"flag-ar": "Spanish",
"flag-as": "English",
"flag-at": "German",
"flag-au": "English",
"flag-aw": "Dutch",
"flag-ax": "Swedish",
"flag-az": "Spanish",
"flag-ba": "Bosnian",
"flag-bb": "English",
"flag-bd": "Bengali",
"flag-be": "Dutch",
"flag-bf": "French",
"flag-bg": "Bulgarian",
"flag-bh": "Arabic",
"flag-bi": "French",
"flag-bj": "French",
"flag-bl": "French",
"flag-bn": "English",
"flag-bm": "Malay",
"flag-bo": "Spanish",
"flag-bq": "Dutch",
"flag-br": "Portuguese",
"flag-bs": "English",
"flag-bt": "Dzongkha",
"flag-bv": "Norwegian",
"flag-bw": "English",
"flag-by": "Belarusian",
"flag-bz": "English",
"flag-ca": "English",
"flag-cc": "Malay",
"flag-cd": "French",
"flag-cf": "French",
"flag-cg": "French",
"flag-ch": "German",
"flag-ci": "French",
"flag-ck": "English",
"flag-cl": "Spanish",
"flag-cm": "French",
"flag-cn": "Chinese Simplified",
"flag-co": "Spanish",
"flag-cp": "French",
"flag-cr": "Spanish",
"flag-cu": "Spanish",
"flag-cv": "Portuguese",
"flag-cw": "Dutch",
"flag-cx": "English",
"flag-cy": "Greek",
"flag-cz": "Czech",
"flag-de": "German",
"flag-dg": "English",
"flag-dj": "French",
"flag-dk": "Danish",
"flag-dm": "English",
"flag-do": "Spanish",
"flag-dz": "Arabic",
"flag-ea": "Spanish",
"flag-ec": "Spanish",
"flag-ee": "Estonian",
"flag-eg": "Arabic",
"flag-eh": "Arabic",
"flag-er": "Arabic",
"flag-es": "Spanish",
"flag-et": "Oromo",
"flag-fi": "Finnish",
"flag-fj": "English",
"flag-fk": "English",
"flag-fm": "English",
"flag-fr": "French",
"flag-ga": "French",
"flag-gb": "English",
"flag-gd": "English",
"flag-ge": "Georgian",
"flag-gf": "French",
"flag-gg": "English",
"flag-gh": "English",
"flag-gi": "English",
"flag-gl": "Danish",
"flag-gm": "English",
"flag-gn": "French",
"flag-gp": "French",
"flag-gq": "Spanish",
"flag-gr": "Greek",
"flag-gs": "English",
"flag-gt": "Spanish",
"flag-gu": "English",
"flag-gw": "Portuguese",
"flag-gy": "English",
"flag-hk": "Chinese Traditional",
"flag-hn": "Spanish",
"flag-hr": "Croatian",
"flag-ht": "Haitian Creole",
"flag-hu": "Hungarian",
"flag-ic": "Spanish",
"flag-id": "Indonesian",
"flag-ie": "Irish",
"flag-il": "Hebrew",
"flag-im": "English",
"flag-in": "Hindi",
"flag-io": "English",
"flag-iq": "Arabic",
"flag-ir": "Persian",
"flag-is": "Icelandic",
"flag-it": "Italian",
"flag-je": "English",
"flag-jm": "English",
"flag-jo": "Arabic",
"flag-jp": "Japanese",
"flag-ke": "English",
"flag-kg": "Kyrgyz",
"flag-kh": "Khmer",
"flag-ki": "English",
"flag-kn": "English",
"flag-kp": "Korean",
"flag-kr": "Korean",
"flag-kw": "Arabic",
"flag-ky": "English",
"flag-kz": "Kazakh",
"flag-la": "Lao",
"flag-lb": "Arabic",
"flag-lc": "English",
"flag-li": "German",
"flag-lk": "Sinhala",
"flag-lr": "English",
"flag-ls": "Sesotho",
"flag-lt": "Lithuanian",
"flag-lu": "Luxembourgish",
"flag-lv": "Latvian",
"flag-ly": "Arabic",
"flag-ma": "Arabic",
"flag-mc": "French",
"flag-md": "Romanian",
"flag-mg": "Malagasy",
"flag-mh": "Marshallese",
"flag-mk": "Macedonian",
"flag-ml": "French",
"flag-mm": "Burmese",
"flag-mn": "Mongolian",
"flag-mo": "Chinese Traditional",
"flag-mp": "English",
"flag-mq": "French",
"flag-mr": "Arabic",
"flag-ms": "English",
"flag-mt": "Maltese",
"flag-mu": "English",
"flag-mv": "Dhivehi",
"flag-mw": "English",
"flag-mx": "Spanish",
"flag-my": "Malay",
"flag-mz": "Portuguese",
"flag-na": "English",
"flag-nc": "French",
"flag-ne": "French",
"flag-nf": "English",
"flag-ng": "English",
"flag-ni": "Spanish",
"flag-nl": "Dutch",
"flag-no": "Norwegian",
"flag-np": "Nepali",
"flag-nr": "Nauru",
"flag-nu": "Niuean",
"flag-nz": "English",
"flag-om": "Arabic",
"flag-pa": "Spanish",
"flag-pe": "Spanish",
"flag-pf": "French",
"flag-pg": "English",
"flag-ph": "Tagalog",
"flag-pk": "Urdu",
"flag-pl": "Polish",
"flag-pm": "French",
"flag-pn": "English",
"flag-pr": "Spanish",
"flag-ps": "Arabic",
"flag-pt": "Portuguese",
"flag-pw": "English",
"flag-py": "Spanish",
"flag-qa": "Arabic",
"flag-re": "French",
"flag-ro": "Romanian",
"flag-rs": "Serbian",
"flag-ru": "Russian",
"flag-rw": "Kinyarwanda",
"flag-sa": "Arabic",
"flag-sb": "English",
"flag-sc": "English",
"flag-sd": "Arabic",
"flag-se": "Swedish",
"flag-sg": "English",
"flag-sh": "English",
"flag-si": "Slovenian",
"flag-sj": "Norwegian",
"flag-sk": "Slovak",
"flag-sl": "English",
"flag-sm": "Italian",
"flag-sn": "French",
"flag-so": "Somali",
"flag-sr": "Dutch",
"flag-ss": "English",
"flag-st": "Portuguese",
"flag-sv": "Spanish",
"flag-sx": "Dutch",
"flag-sw": "Arabic",
"flag-sz": "Swati",
"flag-ta": "English",
"flag-tc": "English",
"flag-td": "French",
"flag-tf": "French",
"flag-tg": "French",
"flag-th": "Thai",
"flag-tj": "Tajik",
"flag-tk": "Tokelau",
"flag-tl": "Tetum",
"flag-tm": "Turkmen",
"flag-tn": "Arabic",
"flag-tr": "Turkish",
"flag-tt": "English",
"flag-tv": "Tuvalua",
"flag-tw": "Chinese Traditional",
"flag-tz": "Swahili",
"flag-ua": "Ukrainian",
"flag-ug": "English",
"flag-um": "English",
"flag-us": "English",
"flag-uy": "Spanish",
"flag-uz": "Uzbek",
"flag-va": "Italian",
"flag-vc": "English",
"flag-ve": "Spanish",
"flag-vg": "English",
"flag-vi": "English",
"flag-vn": "Vietnamese",
"flag-vu": "English",
"flag-wf": "French",
"flag-ws": "Samoan",
"flag-xk": "Albanian",
"flag-ye": "Arabic",
"flag-yt": "French",
"flag-za": "Afrikaans",
"flag-zm": "English",
"flag-zw": "English",
"flag-to": "",
"flag-me": "",
"flag-km": "",
"flag-hm": "",
"flag-mf": "Saint Martin",
"flag-fo": "Faroe Islands",
"flag-eu": "EU",
"flag-aq": "Antarctica",
}
func (c *SlackClient) handleEvent(data string) (string, error) {
var se slackEvent
if err := json.Unmarshal([]byte(data), &se); err != nil {
log.Println("[Error] JSON unmarshal error:", err)
return "", err
}
// check verification token
if se.Token != c.verificationToken {
log.Println("[Error] slack verification token do not match error: ", se.Token)
return "", errors.New("slack verification token do not match!!/n")
}
// url verification
if se.Type == "url_verification" {
log.Println("[Accepted] url_verification event")
return fmt.Sprintf(`{"challenge": %s}`, se.Challenge), nil
}
if se.Event.Type != "reaction_added" {
log.Println("[Rejected] slack event type do not 'reaction_added': ", se.Event.Type)
return "", nil
}
// filter the channel?
if c.channelID != "" {
if c.channelID != se.Event.Item.Channel {
log.Println("[Rejected] slack channel ID do not match: ", se.Event.Item.Channel)
return "", nil
}
}
// determine the language from the flag emoji
reactionText := se.Event.Reaction
if !strings.HasPrefix(reactionText, "flag-") {
reactionText = "flag-" + reactionText
}
targetCode := GetLanguageCode(flagMap[reactionText])
if targetCode == "" {
log.Println("[Rejected] it does not correspond to that emoji reaction: ", se.Event.Reaction)
return "", nil
}
// get slack message
msg, err := c.getMessage(se.Event.Item.Channel, se.Event.Item.Ts)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return "", errors.New("failed to get slack messages/n")
}
// estimate language from original text
awsClient := NewAwsClient()
sourceCode, err := awsClient.detectLanguageCode(msg.text)
if err != nil {
log.Println("[Error] failed to get language code: ", err)
return "", err
}
// translate text
translatedText, err := awsClient.translate(msg.text, sourceCode, targetCode)
if err != nil {
log.Println("[Error] failed to translate message: ", err)
return "", err
}
// return translation result to slack
msg.channel = se.Event.Item.Channel
msg.reaction = se.Event.Reaction
msg.translated = translatedText
msg.source = sourceCode
msg.target = targetCode
err = c.postMessage(msg)
if err != nil {
log.Println("[Error] failed to post slack message: ", err)
return "", err
}
return "", nil
}
// https://api.slack.com/methods/chat.postMessage
func (c *SlackClient) postMessage(msg *slackMsg) error {
attachment := slack.Attachment{}
attachment.Pretext = fmt.Sprintf("_The message is translated in_ :%s: _(%s-%s)_", msg.reaction, msg.source, msg.target)
attachment.Text = msg.translated
attachment.Footer = msg.text
attachment.MarkdownIn = []string{"text", "pretext"}
params := slack.NewPostMessageParameters()
params.ThreadTimestamp = msg.ts
params.AsUser = false
params.Attachments = []slack.Attachment{attachment}
_, _, err := c.client.PostMessage(msg.channel, "", params)
if err != nil |
return nil
}
// https://api.slack.com/methods/conversations.replies
func (c *SlackClient) getMessage(id string, ts string) (*slackMsg, error) {
params := &slack.GetConversationRepliesParameters{}
params.ChannelID = id
params.Timestamp = ts
params.Inclusive = true
params.Limit = 1
// get slack messages
msg, _, _, err := c.client.GetConversationReplies(params)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return nil, err
}
// get message text
slMsg := &slackMsg{}
for _, i := range msg {
slMsg.ts = i.Timestamp
if slMsg.ts == "" {
slMsg.ts = i.ThreadTimestamp
}
slMsg.text = i.Text
if slMsg.text == "" {
for _, j := range i.Attachments {
slMsg.text = j.Text
break
}
}
break
}
return slMsg, nil
}
| {
log.Println("[Error] failed to post slack messages: ", err)
return err
} | conditional_block |
slack.go | package main
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"github.com/nlopes/slack"
)
// SlackClient is
type SlackClient struct {
client *slack.Client
verificationToken string
channelID string
}
type slackMsg struct {
text string
ts string
channel string
reaction string
translated string
source string
target string
}
// automatically generated using the following
// https://mholt.github.io/json-to-go/
type slackEvent struct {
// only url_verification event
// https://api.slack.com/events/url_verification
Challenge string `json:"challenge"`
Token string `json:"token"`
TeamID string `json:"team_id"`
APIAppID string `json:"api_app_id"`
Event struct {
Type string `json:"type"`
User string `json:"user"`
Item struct {
Type string `json:"type"`
Channel string `json:"channel"`
Ts string `json:"ts"`
} `json:"item"`
Reaction string `json:"reaction"`
ItemUser string `json:"item_user"`
EventTs string `json:"event_ts"`
} `json:"event"`
Type string `json:"type"`
EventID string `json:"event_id"`
EventTime int `json:"event_time"`
AuthedUsers []string `json:"authed_users"`
}
// https://api.slack.com/events/url_verification
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P",
"type": "url_verification"
}
*/
// https://api.slack.com/events-api#receiving_events
// https://api.slack.com/events/reaction_added
/*
{
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"team_id": "T0123086H",
"api_app_id": "A1231KF9R",
"event": {
"type": "reaction_added",
"user": "U0G9QF9C6",
"item": {
"type": "message",
"channel": "C0G9QF9GZ",
"ts": "1518504389.000166"
},
"reaction": "eyes",
"item_user": "U0G9QF9C6",
"event_ts": "1518507482.000119"
},
"type": "event_callback",
"event_id": "Ev97FS5N0Y",
"event_time": 1518507482,
"authed_users": [
"U0G9QF9C6"
]
}
*/
// flag emoji to language
var flagMap = map[string]string{
"flag-ac": "English",
"flag-ad": "Catalan",
"flag-ae": "Arabic",
"flag-af": "Pashto",
"flag-ag": "English",
"flag-ai": "English",
"flag-al": "Albanian",
"flag-am": "Armenian",
"flag-ao": "Portuguese",
"flag-ar": "Spanish",
"flag-as": "English",
"flag-at": "German",
"flag-au": "English",
"flag-aw": "Dutch",
"flag-ax": "Swedish",
"flag-az": "Spanish",
"flag-ba": "Bosnian",
"flag-bb": "English",
"flag-bd": "Bengali",
"flag-be": "Dutch",
"flag-bf": "French",
"flag-bg": "Bulgarian",
"flag-bh": "Arabic",
"flag-bi": "French",
"flag-bj": "French",
"flag-bl": "French",
"flag-bn": "English",
"flag-bm": "Malay",
"flag-bo": "Spanish",
"flag-bq": "Dutch",
"flag-br": "Portuguese",
"flag-bs": "English",
"flag-bt": "Dzongkha",
"flag-bv": "Norwegian",
"flag-bw": "English",
"flag-by": "Belarusian",
"flag-bz": "English",
"flag-ca": "English",
"flag-cc": "Malay",
"flag-cd": "French",
"flag-cf": "French",
"flag-cg": "French",
"flag-ch": "German",
"flag-ci": "French",
"flag-ck": "English",
"flag-cl": "Spanish",
"flag-cm": "French",
"flag-cn": "Chinese Simplified",
"flag-co": "Spanish",
"flag-cp": "French",
"flag-cr": "Spanish",
"flag-cu": "Spanish",
"flag-cv": "Portuguese",
"flag-cw": "Dutch",
"flag-cx": "English",
"flag-cy": "Greek",
"flag-cz": "Czech",
"flag-de": "German",
"flag-dg": "English",
"flag-dj": "French",
"flag-dk": "Danish",
"flag-dm": "English",
"flag-do": "Spanish",
"flag-dz": "Arabic",
"flag-ea": "Spanish",
"flag-ec": "Spanish",
"flag-ee": "Estonian",
"flag-eg": "Arabic",
"flag-eh": "Arabic",
"flag-er": "Arabic",
"flag-es": "Spanish",
"flag-et": "Oromo",
"flag-fi": "Finnish",
"flag-fj": "English",
"flag-fk": "English",
"flag-fm": "English",
"flag-fr": "French",
"flag-ga": "French",
"flag-gb": "English",
"flag-gd": "English",
"flag-ge": "Georgian",
"flag-gf": "French",
"flag-gg": "English",
"flag-gh": "English",
"flag-gi": "English",
"flag-gl": "Danish",
"flag-gm": "English",
"flag-gn": "French",
"flag-gp": "French",
"flag-gq": "Spanish",
"flag-gr": "Greek",
"flag-gs": "English",
"flag-gt": "Spanish",
"flag-gu": "English",
"flag-gw": "Portuguese",
"flag-gy": "English",
"flag-hk": "Chinese Traditional",
"flag-hn": "Spanish",
"flag-hr": "Croatian",
"flag-ht": "Haitian Creole",
"flag-hu": "Hungarian",
"flag-ic": "Spanish",
"flag-id": "Indonesian",
"flag-ie": "Irish",
"flag-il": "Hebrew",
"flag-im": "English",
"flag-in": "Hindi",
"flag-io": "English",
"flag-iq": "Arabic",
"flag-ir": "Persian",
"flag-is": "Icelandic",
"flag-it": "Italian",
"flag-je": "English",
"flag-jm": "English",
"flag-jo": "Arabic",
"flag-jp": "Japanese",
"flag-ke": "English",
"flag-kg": "Kyrgyz",
"flag-kh": "Khmer",
"flag-ki": "English",
"flag-kn": "English",
"flag-kp": "Korean",
"flag-kr": "Korean",
"flag-kw": "Arabic",
"flag-ky": "English",
"flag-kz": "Kazakh",
"flag-la": "Lao",
"flag-lb": "Arabic",
"flag-lc": "English",
"flag-li": "German",
"flag-lk": "Sinhala",
"flag-lr": "English",
"flag-ls": "Sesotho",
"flag-lt": "Lithuanian",
"flag-lu": "Luxembourgish",
"flag-lv": "Latvian",
"flag-ly": "Arabic",
"flag-ma": "Arabic",
"flag-mc": "French",
"flag-md": "Romanian",
"flag-mg": "Malagasy",
"flag-mh": "Marshallese",
"flag-mk": "Macedonian",
"flag-ml": "French",
"flag-mm": "Burmese",
"flag-mn": "Mongolian",
"flag-mo": "Chinese Traditional",
"flag-mp": "English",
"flag-mq": "French",
"flag-mr": "Arabic",
"flag-ms": "English",
"flag-mt": "Maltese",
"flag-mu": "English",
"flag-mv": "Dhivehi",
"flag-mw": "English",
"flag-mx": "Spanish",
"flag-my": "Malay",
"flag-mz": "Portuguese",
"flag-na": "English",
"flag-nc": "French",
"flag-ne": "French",
"flag-nf": "English",
"flag-ng": "English",
"flag-ni": "Spanish",
"flag-nl": "Dutch",
"flag-no": "Norwegian",
"flag-np": "Nepali",
"flag-nr": "Nauru",
"flag-nu": "Niuean",
"flag-nz": "English",
"flag-om": "Arabic",
"flag-pa": "Spanish",
"flag-pe": "Spanish",
"flag-pf": "French",
"flag-pg": "English",
"flag-ph": "Tagalog",
"flag-pk": "Urdu",
"flag-pl": "Polish",
"flag-pm": "French",
"flag-pn": "English",
"flag-pr": "Spanish",
"flag-ps": "Arabic",
"flag-pt": "Portuguese",
"flag-pw": "English",
"flag-py": "Spanish",
"flag-qa": "Arabic",
"flag-re": "French",
"flag-ro": "Romanian",
"flag-rs": "Serbian",
"flag-ru": "Russian",
"flag-rw": "Kinyarwanda",
"flag-sa": "Arabic",
"flag-sb": "English",
"flag-sc": "English",
"flag-sd": "Arabic",
"flag-se": "Swedish",
"flag-sg": "English", | "flag-sh": "English",
"flag-si": "Slovenian",
"flag-sj": "Norwegian",
"flag-sk": "Slovak",
"flag-sl": "English",
"flag-sm": "Italian",
"flag-sn": "French",
"flag-so": "Somali",
"flag-sr": "Dutch",
"flag-ss": "English",
"flag-st": "Portuguese",
"flag-sv": "Spanish",
"flag-sx": "Dutch",
"flag-sw": "Arabic",
"flag-sz": "Swati",
"flag-ta": "English",
"flag-tc": "English",
"flag-td": "French",
"flag-tf": "French",
"flag-tg": "French",
"flag-th": "Thai",
"flag-tj": "Tajik",
"flag-tk": "Tokelau",
"flag-tl": "Tetum",
"flag-tm": "Turkmen",
"flag-tn": "Arabic",
"flag-tr": "Turkish",
"flag-tt": "English",
"flag-tv": "Tuvalua",
"flag-tw": "Chinese Traditional",
"flag-tz": "Swahili",
"flag-ua": "Ukrainian",
"flag-ug": "English",
"flag-um": "English",
"flag-us": "English",
"flag-uy": "Spanish",
"flag-uz": "Uzbek",
"flag-va": "Italian",
"flag-vc": "English",
"flag-ve": "Spanish",
"flag-vg": "English",
"flag-vi": "English",
"flag-vn": "Vietnamese",
"flag-vu": "English",
"flag-wf": "French",
"flag-ws": "Samoan",
"flag-xk": "Albanian",
"flag-ye": "Arabic",
"flag-yt": "French",
"flag-za": "Afrikaans",
"flag-zm": "English",
"flag-zw": "English",
"flag-to": "",
"flag-me": "",
"flag-km": "",
"flag-hm": "",
"flag-mf": "Saint Martin",
"flag-fo": "Faroe Islands",
"flag-eu": "EU",
"flag-aq": "Antarctica",
}
func (c *SlackClient) handleEvent(data string) (string, error) {
var se slackEvent
if err := json.Unmarshal([]byte(data), &se); err != nil {
log.Println("[Error] JSON unmarshal error:", err)
return "", err
}
// check verification token
if se.Token != c.verificationToken {
log.Println("[Error] slack verification token do not match error: ", se.Token)
return "", errors.New("slack verification token do not match!!/n")
}
// url verification
if se.Type == "url_verification" {
log.Println("[Accepted] url_verification event")
return fmt.Sprintf(`{"challenge": %s}`, se.Challenge), nil
}
if se.Event.Type != "reaction_added" {
log.Println("[Rejected] slack event type do not 'reaction_added': ", se.Event.Type)
return "", nil
}
// filter the channel?
if c.channelID != "" {
if c.channelID != se.Event.Item.Channel {
log.Println("[Rejected] slack channel ID do not match: ", se.Event.Item.Channel)
return "", nil
}
}
// determine the language from the flag emoji
reactionText := se.Event.Reaction
if !strings.HasPrefix(reactionText, "flag-") {
reactionText = "flag-" + reactionText
}
targetCode := GetLanguageCode(flagMap[reactionText])
if targetCode == "" {
log.Println("[Rejected] it does not correspond to that emoji reaction: ", se.Event.Reaction)
return "", nil
}
// get slack message
msg, err := c.getMessage(se.Event.Item.Channel, se.Event.Item.Ts)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return "", errors.New("failed to get slack messages/n")
}
// estimate language from original text
awsClient := NewAwsClient()
sourceCode, err := awsClient.detectLanguageCode(msg.text)
if err != nil {
log.Println("[Error] failed to get language code: ", err)
return "", err
}
// translate text
translatedText, err := awsClient.translate(msg.text, sourceCode, targetCode)
if err != nil {
log.Println("[Error] failed to translate message: ", err)
return "", err
}
// return translation result to slack
msg.channel = se.Event.Item.Channel
msg.reaction = se.Event.Reaction
msg.translated = translatedText
msg.source = sourceCode
msg.target = targetCode
err = c.postMessage(msg)
if err != nil {
log.Println("[Error] failed to post slack message: ", err)
return "", err
}
return "", nil
}
// https://api.slack.com/methods/chat.postMessage
func (c *SlackClient) postMessage(msg *slackMsg) error {
attachment := slack.Attachment{}
attachment.Pretext = fmt.Sprintf("_The message is translated in_ :%s: _(%s-%s)_", msg.reaction, msg.source, msg.target)
attachment.Text = msg.translated
attachment.Footer = msg.text
attachment.MarkdownIn = []string{"text", "pretext"}
params := slack.NewPostMessageParameters()
params.ThreadTimestamp = msg.ts
params.AsUser = false
params.Attachments = []slack.Attachment{attachment}
_, _, err := c.client.PostMessage(msg.channel, "", params)
if err != nil {
log.Println("[Error] failed to post slack messages: ", err)
return err
}
return nil
}
// https://api.slack.com/methods/conversations.replies
func (c *SlackClient) getMessage(id string, ts string) (*slackMsg, error) {
params := &slack.GetConversationRepliesParameters{}
params.ChannelID = id
params.Timestamp = ts
params.Inclusive = true
params.Limit = 1
// get slack messages
msg, _, _, err := c.client.GetConversationReplies(params)
if err != nil {
log.Println("[Error] failed to get slack messages: ", err)
return nil, err
}
// get message text
slMsg := &slackMsg{}
for _, i := range msg {
slMsg.ts = i.Timestamp
if slMsg.ts == "" {
slMsg.ts = i.ThreadTimestamp
}
slMsg.text = i.Text
if slMsg.text == "" {
for _, j := range i.Attachments {
slMsg.text = j.Text
break
}
}
break
}
return slMsg, nil
} | random_line_split |
|
c01_mainChangeover.py | # --------CHANGE OVERS
######################################### 0 Intro #########################################
# Naming Convention: first letter of variable indicates the type
# a = array
# b = binary / boolean
# c = code, for .py files only
# d = dictionary
# f = float
# g = graph
# i = integer
# l = list
# lim = limit
# s = string
# file = file generated or modified through code
import pyodbc as db
import random
import pandas as pd
import numpy as np
import c02_geneticAlgorithmFunctions as gak
import c03_globalVariables as glob
import sys
import matplotlib.pyplot as plt
import math
import datetime
import os
fMinFitness = 10000000000
iGenerationCount = 0
lMinFitness = [10000000000, 'START', [],[],""]
lMinFitness_history = [10000000000]
lFitness_history=[]
lIllegal_history=[]
######################################### 1 DATA IMPORT #########################################
### 1.1 Get Material Family Data
# for now based on excel; check c13_ImportFromSQL.py for SQL import cide
dFamilyCO = {}
dMaterialFamily = {}
dWcList = {}
dMachineConfig = {}
dMaterialCO ={}
glob.lFamilyAtlas_0 = []
glob.lMaterialAtlas_0 = []
#import from Excel
dfWCImport = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="order")
dfFamilies = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="families")
dfFamilyCO = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="familyCO")
dfMachineConfig = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="notOnMachine")
dfMaterialCO = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="materialCO")
#fill WC List
for index, row in dfWCImport.iterrows():
if index >= glob.iBreakImport:
break
dWcList[row.orderNumber]={}
dWcList[row.orderNumber]['material'] = row.materialCode
dWcList[row.orderNumber]['quantity'] = row.quantity
dWcList[row.orderNumber]['priority'] = row.priority
#Create TimeMatrix dictionary from Query Results
for index, row in dfFamilyCO.iterrows():
dFamilyCO[row.relID]= row['time']
glob.lFamilyAtlas_0.append(row["familyAtlas"])
#Create materialFamily dictionary from Query Results
for index, row in dfFamilies.iterrows():
dMaterialFamily[row.Material] = {}
dMaterialFamily[row.Material]['family'] = row.materialFamily
dMaterialFamily[row.Material]['cycleTime'] = row.cycleTime
#Create MachineConfig >> ILLEGAL MACHINE CONFIG, machines the family is not allowed on
for index, row in dfMachineConfig.iterrows():
dMachineConfig[row.family] = [int(x) for x in str(row.notOnMachine).split(",")]
#Create Material changeover time mapping
for index, row in dfMaterialCO.iterrows():
|
#open file to track usage history
filePopulationHistory = open(os.path.join(glob.sPathToExcels, "90_populationHistory.txt"), "w", encoding="utf-8")
fileFitnessHistory_runs = open(os.path.join(glob.sPathToExcels, "91_fitnessRuns.txt"), "a", encoding="utf-8")
######################################### 2 GA SETUP #########################################
# TO DO
# > use a more intelligent filling for initial population
### 2.1 Iterate over WC list and populate arrays
# initialize population randomly from list
lGenome = []
lPopulation = []
dPopulation = {}
lPopulation_names =[]
glob.lGenome_0 = []
# create original genome with all orders contained
for order in dWcList.keys():
glob.lGenome_0.append(order)
# create list of 0s to fill fill the genome to a length of machines x genes to represent all possible machines in one genome
lEmptyAppend = [i*0 for i in range(0, (glob.iNumberMachines-1)*len(glob.lGenome_0))]
lGenome = glob.lGenome_0+lEmptyAppend
# from the filled Genome, create n = limPopulationSize initial parents
for i in range(0,glob.limPopulationSize):
lNewMember, lNewBreaker = gak.udf_makeNewMember(glob.lGenome_0)
gak.udf_listSortByBreak(lNewMember, lNewBreaker, 0)
# populate the Population dictionary
dPopulation["member"+str(i)] = {}
dPopulation["member"+str(i)]["genome"] = lNewMember
dPopulation["member"+str(i)]["breaker"] = lNewBreaker
# write the first population to the history file
filePopulationHistory.write("#"+str(iGenerationCount)+".1------------------------ Original Population ------------------------"+"\n")
for i,w in enumerate(lPopulation):
filePopulationHistory.write(lPopulation_names[i]+": "+str(w)+"\n")
######################################### 3 GA Algorithm #########################################
# ! Arrays ending on "_names" are parallel arrays to track member names
# iterate until break point reached (see below)
iBreakLoop = glob.iBreakGeneration
while iGenerationCount < iBreakLoop:
fIllegalPerc = 0.0
iGenerationCount += 1
print("--------------------------------- GENERATION: "+str(iGenerationCount)+"---------------------------------")
# execute function to calculate fitness of population
# determine randomly if a cataclysm should occur; cataclsym = "kills off" the population and fills it with newly created one
if random.uniform(0.0, 1.0) < glob.iCataclysmicProb and glob.bCataclysm == True:
print("<<<<<<<<<<<<<<<<<<< CATACLYSM TIME <<<<<<<<<<<<<<<<<<<")
dPopulation = gak.udf_cataclysm(dPopulation, glob.lGenome_0)
# Add runs to the overall counter after cataclysm
glob.iCataclysmicProb = glob.iCataclysmicProb/2
iBreakLoop += glob.iBreakGeneration
# calculte fitness for each member in the population
lFitness, dMembers, lMinFitness, fMinFitness_run, fIllegalPerc = gak.udf_calcFitness3(dPopulation, dWcList, dMaterialFamily, dFamilyCO, dMaterialCO, lMinFitness, dMachineConfig, iGenerationCount)
lFitness_history.append(fMinFitness_run)
lIllegal_history.append(fIllegalPerc)
# if the fitness is lower then the previous fintness lever, update the minimum fitness
if lMinFitness[0] <= fMinFitness:
fMinFitness = lMinFitness[0]
# append calculated fitness for new lowest level
lMinFitness_history.append(fMinFitness)
# create table and calculate selection probabilities
lFitness_sorted = gak.udf_sortByFitness(lFitness)
# initialize population arrays
lPopulation_new = []
lPopulation_new_names = []
dPopulation_new ={}
# select parents randomly to form new population
lPopulation_new, lPopulation_new_names, dPopulation_new = gak.udf_selectParentsFromPool(dMembers, lFitness_sorted, dPopulation)
# Mating time - execute mating functions and initialize offspring arrays
lPopulation_offspring = []
lPopulation_offspring_names = []
dPopulation_offspring ={}
# lPopulation_offspring, glob.iChildCounter, lPopulation_offspring_names, dPopulation_offspring = gak.udf_matingPMX(lPopulation_new, glob.iChildCounter, lPopulation_new_names, dPopulation_new, dMembers, glob.fMutationRate)
dPopulation_offspring = gak.udf_cloneMutate(dPopulation_new, dMembers, dMaterialFamily, dMachineConfig, dWcList, lGenome)
# Mutating Time - execute swap-mutate function
gak.udf_mutateSwap(glob.fMutationRate, dPopulation_offspring)
# recreate the population array with the selected parents from previous iteration
dPopulation={}
for i,member in dPopulation_new.items():
# avoid double entries, which are technically possible due to selection method
if member["member"] not in dPopulation:
dPopulation[member["member"]]={}
dPopulation[member["member"]]["genome"]=member["genome"]
dPopulation[member["member"]]["breaker"]=member["breaker"]
# deconstruct newly created parent array and the mutated offspring array
dPopulation = {**dPopulation, **dPopulation_offspring}
# calculate starting point for trailing average
iAvgStart = len(lMinFitness_history)-glob.iPastAverage
if iAvgStart < 5:
iAvgStart = 0
# break the while loop if no lower fitness could be found for "iAvgStart" number of generations
if sum(lMinFitness_history[(iAvgStart):(len(lMinFitness_history))])/((len(lMinFitness_history))-iAvgStart) == fMinFitness:
break
# close file
filePopulationHistory.close()
# terminal end prints
print("===============================================================================================")
print("RESULT: ", lMinFitness[0])
print("MEMBER: ", lMinFitness[1])
print( lMinFitness[4])
print(np.corrcoef(lFitness_history, lIllegal_history)[1])
# print machines in termial
gak.udf_printMachinesCMD(lMinFitness[2], lMinFitness[3], lMinFitness[1])
print("__________________________________________")
# print machines with familes not materials
gak.udf_printMachinesFamCMD(lMinFitness[2], lMinFitness[3], lMinFitness[1], dMaterialFamily, dWcList)
######################################### 4 Graphing it #########################################
# set min and max for the y axes
y1Min = math.floor(min(lFitness_history)/1000)*1000
y1Max = math.ceil(min(lFitness_history)/1000)*2000
y2Min = math.floor(min(lIllegal_history))-0.1
y2Max = math.ceil(min(lIllegal_history))+0.1
# set parameters for saving the plot
dateTime = datetime.datetime.now()
iMilliseconds = int(round(dateTime.timestamp() * 1000))
sFileNamePlot = str(iMilliseconds)+"__RESULT_"+str(math.floor(lMinFitness[0]))+"__orders_"+str(len(glob.lGenome_0))+"--machines_"+str(glob.iNumberMachines)+"--Runs_"+str(glob.iBreakGeneration)+"--popSize_"+str(glob.limPopulationSize)+"--mut_"+str(glob.fMutationRate)+"--King_"+str(glob.bKingPrevails)+"--fAlloc_"+str(glob.iForceAllocation_G)+"--CAT_"+str(glob.bCataclysm)+"_"+str(glob.iCataclysmicProb)+"_"+str(glob.iDeletionProb)
sPlotPath = os.path.join(glob.sPathToExcels, "99_Output",sFileNamePlot+".png")
# create subplot
gFitness, ax1 = plt.subplots()
# set options
color = "tab:blue"
ax1.set_ylabel("Fitness")
ax1.set_xlabel("Runs")
ax1.set_ylim(y1Min, y1Max)
ax1.plot(lFitness_history, color=color)
# create twin axis and set
ax2 = plt.twinx()
color ="tab:green"
ax2.set_ylabel("Illegal Percentage")
ax2.set_ylim(y2Min, y2Max)
ax2.plot(lIllegal_history, color=color, linestyle="--")
gFitness.tight_layout()
#save and plot
plt.savefig(sPlotPath)
plt.show()
fileFitnessHistory_runs.write(str(lMinFitness[0])+"\n")
############################################## THE END ############################################## | dMaterialCO[row.materialRel] = row["timeCO"]
glob.lMaterialAtlas_0.append(row["materialAtlas"]) | conditional_block |
c01_mainChangeover.py | # --------CHANGE OVERS
######################################### 0 Intro #########################################
# Naming Convention: first letter of variable indicates the type
# a = array
# b = binary / boolean
# c = code, for .py files only
# d = dictionary
# f = float
# g = graph
# i = integer
# l = list
# lim = limit
# s = string
# file = file generated or modified through code
import pyodbc as db
import random
import pandas as pd
import numpy as np
import c02_geneticAlgorithmFunctions as gak
import c03_globalVariables as glob
import sys
import matplotlib.pyplot as plt
import math
import datetime
import os
fMinFitness = 10000000000
iGenerationCount = 0
lMinFitness = [10000000000, 'START', [],[],""]
lMinFitness_history = [10000000000]
lFitness_history=[]
lIllegal_history=[]
######################################### 1 DATA IMPORT #########################################
### 1.1 Get Material Family Data
# for now based on excel; check c13_ImportFromSQL.py for SQL import cide
dFamilyCO = {}
dMaterialFamily = {}
dWcList = {}
dMachineConfig = {}
dMaterialCO ={}
glob.lFamilyAtlas_0 = []
glob.lMaterialAtlas_0 = []
#import from Excel
dfWCImport = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="order")
dfFamilies = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="families")
dfFamilyCO = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="familyCO")
dfMachineConfig = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="notOnMachine")
dfMaterialCO = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="materialCO")
#fill WC List
for index, row in dfWCImport.iterrows():
if index >= glob.iBreakImport:
break
dWcList[row.orderNumber]={}
dWcList[row.orderNumber]['material'] = row.materialCode
dWcList[row.orderNumber]['quantity'] = row.quantity
dWcList[row.orderNumber]['priority'] = row.priority
#Create TimeMatrix dictionary from Query Results
for index, row in dfFamilyCO.iterrows():
dFamilyCO[row.relID]= row['time']
glob.lFamilyAtlas_0.append(row["familyAtlas"])
#Create materialFamily dictionary from Query Results
for index, row in dfFamilies.iterrows():
dMaterialFamily[row.Material] = {}
dMaterialFamily[row.Material]['family'] = row.materialFamily
dMaterialFamily[row.Material]['cycleTime'] = row.cycleTime
#Create MachineConfig >> ILLEGAL MACHINE CONFIG, machines the family is not allowed on
for index, row in dfMachineConfig.iterrows():
dMachineConfig[row.family] = [int(x) for x in str(row.notOnMachine).split(",")]
#Create Material changeover time mapping
for index, row in dfMaterialCO.iterrows():
dMaterialCO[row.materialRel] = row["timeCO"]
glob.lMaterialAtlas_0.append(row["materialAtlas"])
#open file to track usage history
filePopulationHistory = open(os.path.join(glob.sPathToExcels, "90_populationHistory.txt"), "w", encoding="utf-8")
fileFitnessHistory_runs = open(os.path.join(glob.sPathToExcels, "91_fitnessRuns.txt"), "a", encoding="utf-8")
######################################### 2 GA SETUP #########################################
# TO DO
# > use a more intelligent filling for initial population
### 2.1 Iterate over WC list and populate arrays
# initialize population randomly from list
lGenome = []
lPopulation = []
dPopulation = {}
lPopulation_names =[]
glob.lGenome_0 = []
# create original genome with all orders contained
for order in dWcList.keys():
glob.lGenome_0.append(order)
# create list of 0s to fill fill the genome to a length of machines x genes to represent all possible machines in one genome
lEmptyAppend = [i*0 for i in range(0, (glob.iNumberMachines-1)*len(glob.lGenome_0))]
lGenome = glob.lGenome_0+lEmptyAppend
# from the filled Genome, create n = limPopulationSize initial parents
for i in range(0,glob.limPopulationSize):
lNewMember, lNewBreaker = gak.udf_makeNewMember(glob.lGenome_0)
gak.udf_listSortByBreak(lNewMember, lNewBreaker, 0)
# populate the Population dictionary
dPopulation["member"+str(i)] = {}
dPopulation["member"+str(i)]["genome"] = lNewMember
dPopulation["member"+str(i)]["breaker"] = lNewBreaker
# write the first population to the history file
filePopulationHistory.write("#"+str(iGenerationCount)+".1------------------------ Original Population ------------------------"+"\n")
for i,w in enumerate(lPopulation):
filePopulationHistory.write(lPopulation_names[i]+": "+str(w)+"\n")
######################################### 3 GA Algorithm #########################################
# ! Arrays ending on "_names" are parallel arrays to track member names
# iterate until break point reached (see below)
iBreakLoop = glob.iBreakGeneration
while iGenerationCount < iBreakLoop:
fIllegalPerc = 0.0
iGenerationCount += 1
print("--------------------------------- GENERATION: "+str(iGenerationCount)+"---------------------------------")
# execute function to calculate fitness of population
# determine randomly if a cataclysm should occur; cataclsym = "kills off" the population and fills it with newly created one
if random.uniform(0.0, 1.0) < glob.iCataclysmicProb and glob.bCataclysm == True:
print("<<<<<<<<<<<<<<<<<<< CATACLYSM TIME <<<<<<<<<<<<<<<<<<<")
dPopulation = gak.udf_cataclysm(dPopulation, glob.lGenome_0)
# Add runs to the overall counter after cataclysm
glob.iCataclysmicProb = glob.iCataclysmicProb/2
iBreakLoop += glob.iBreakGeneration
# calculte fitness for each member in the population
lFitness, dMembers, lMinFitness, fMinFitness_run, fIllegalPerc = gak.udf_calcFitness3(dPopulation, dWcList, dMaterialFamily, dFamilyCO, dMaterialCO, lMinFitness, dMachineConfig, iGenerationCount)
lFitness_history.append(fMinFitness_run)
lIllegal_history.append(fIllegalPerc)
# if the fitness is lower then the previous fintness lever, update the minimum fitness | # append calculated fitness for new lowest level
lMinFitness_history.append(fMinFitness)
# create table and calculate selection probabilities
lFitness_sorted = gak.udf_sortByFitness(lFitness)
# initialize population arrays
lPopulation_new = []
lPopulation_new_names = []
dPopulation_new ={}
# select parents randomly to form new population
lPopulation_new, lPopulation_new_names, dPopulation_new = gak.udf_selectParentsFromPool(dMembers, lFitness_sorted, dPopulation)
# Mating time - execute mating functions and initialize offspring arrays
lPopulation_offspring = []
lPopulation_offspring_names = []
dPopulation_offspring ={}
# lPopulation_offspring, glob.iChildCounter, lPopulation_offspring_names, dPopulation_offspring = gak.udf_matingPMX(lPopulation_new, glob.iChildCounter, lPopulation_new_names, dPopulation_new, dMembers, glob.fMutationRate)
dPopulation_offspring = gak.udf_cloneMutate(dPopulation_new, dMembers, dMaterialFamily, dMachineConfig, dWcList, lGenome)
# Mutating Time - execute swap-mutate function
gak.udf_mutateSwap(glob.fMutationRate, dPopulation_offspring)
# recreate the population array with the selected parents from previous iteration
dPopulation={}
for i,member in dPopulation_new.items():
# avoid double entries, which are technically possible due to selection method
if member["member"] not in dPopulation:
dPopulation[member["member"]]={}
dPopulation[member["member"]]["genome"]=member["genome"]
dPopulation[member["member"]]["breaker"]=member["breaker"]
# deconstruct newly created parent array and the mutated offspring array
dPopulation = {**dPopulation, **dPopulation_offspring}
# calculate starting point for trailing average
iAvgStart = len(lMinFitness_history)-glob.iPastAverage
if iAvgStart < 5:
iAvgStart = 0
# break the while loop if no lower fitness could be found for "iAvgStart" number of generations
if sum(lMinFitness_history[(iAvgStart):(len(lMinFitness_history))])/((len(lMinFitness_history))-iAvgStart) == fMinFitness:
break
# close file
filePopulationHistory.close()
# terminal end prints
print("===============================================================================================")
print("RESULT: ", lMinFitness[0])
print("MEMBER: ", lMinFitness[1])
print( lMinFitness[4])
print(np.corrcoef(lFitness_history, lIllegal_history)[1])
# print machines in termial
gak.udf_printMachinesCMD(lMinFitness[2], lMinFitness[3], lMinFitness[1])
print("__________________________________________")
# print machines with familes not materials
gak.udf_printMachinesFamCMD(lMinFitness[2], lMinFitness[3], lMinFitness[1], dMaterialFamily, dWcList)
######################################### 4 Graphing it #########################################
# set min and max for the y axes
y1Min = math.floor(min(lFitness_history)/1000)*1000
y1Max = math.ceil(min(lFitness_history)/1000)*2000
y2Min = math.floor(min(lIllegal_history))-0.1
y2Max = math.ceil(min(lIllegal_history))+0.1
# set parameters for saving the plot
dateTime = datetime.datetime.now()
iMilliseconds = int(round(dateTime.timestamp() * 1000))
sFileNamePlot = str(iMilliseconds)+"__RESULT_"+str(math.floor(lMinFitness[0]))+"__orders_"+str(len(glob.lGenome_0))+"--machines_"+str(glob.iNumberMachines)+"--Runs_"+str(glob.iBreakGeneration)+"--popSize_"+str(glob.limPopulationSize)+"--mut_"+str(glob.fMutationRate)+"--King_"+str(glob.bKingPrevails)+"--fAlloc_"+str(glob.iForceAllocation_G)+"--CAT_"+str(glob.bCataclysm)+"_"+str(glob.iCataclysmicProb)+"_"+str(glob.iDeletionProb)
sPlotPath = os.path.join(glob.sPathToExcels, "99_Output",sFileNamePlot+".png")
# create subplot
gFitness, ax1 = plt.subplots()
# set options
color = "tab:blue"
ax1.set_ylabel("Fitness")
ax1.set_xlabel("Runs")
ax1.set_ylim(y1Min, y1Max)
ax1.plot(lFitness_history, color=color)
# create twin axis and set
ax2 = plt.twinx()
color ="tab:green"
ax2.set_ylabel("Illegal Percentage")
ax2.set_ylim(y2Min, y2Max)
ax2.plot(lIllegal_history, color=color, linestyle="--")
gFitness.tight_layout()
#save and plot
plt.savefig(sPlotPath)
plt.show()
fileFitnessHistory_runs.write(str(lMinFitness[0])+"\n")
############################################## THE END ############################################## | if lMinFitness[0] <= fMinFitness:
fMinFitness = lMinFitness[0]
| random_line_split |
lib.rs | // Copyright 2013-2014 The gl-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # gl_generator
//!
//! `gl_generator` is an OpenGL bindings generator plugin. It defines a macro named
//! `generate_gl_bindings!` which can be used to generate all constants and functions of a
//! given OpenGL version.
//!
//! ## Example
//!
//! ```rust
//! #[phase(plugin)]
//! extern crate gl_generator;
//! extern crate libc;
//!
//! use std::mem;
//! use self::types::*;
//!
//! generate_gl_bindings!("gl", "core", "4.5", "static", [ "GL_EXT_texture_filter_anisotropic" ])
//! ```
//!
//! ## Parameters
//!
//! * API: Can be `gl`, `wgl`, `glx`, `egl`. Only `gl` is supported for the moment.
//! * Profile: Can be `core` or `compatibility`. `core` will only include all functions supported
//! by the requested version it self, while `compatibility` will include all the functions from
//! previous versions as well.
//! * Version: The requested OpenGL version in the format `x.x`.
//! * Generator: Can be `static` or `struct`.
//! * Extensions (optional): An array of extensions to include in the bindings.
//!
#![crate_name = "gl_generator"]
#![comment = "OpenGL function loader generator."]
#![license = "ASL2"]
#![crate_type = "dylib"]
#![feature(phase)]
#![feature(globs)]
#![feature(macro_rules)]
#![feature(plugin_registrar)]
#![feature(quote)]
#[phase(plugin, link)]
extern crate log;
extern crate khronos_api;
extern crate rustc;
extern crate syntax;
use std::path::Path;
use std::io::{File, Reader};
use syntax::parse::token;
use syntax::ast::{ Item, TokenTree };
use syntax::ext::base::{expr_to_string, get_exprs_from_tts, DummyResult, ExtCtxt, MacResult};
use syntax::codemap::Span;
use registry::*;
use static_gen::StaticGenerator;
use struct_gen::StructGenerator;
mod common;
pub mod static_gen;
pub mod struct_gen;
pub mod registry;
pub mod ty;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut ::rustc::plugin::Registry) {
reg.register_macro("generate_gl_bindings", macro_handler);
}
// this is the object that we will return from the "generate_gl_bindings" macro expansion
struct MacroResult {
content: Vec<::std::gc::Gc<Item>>
}
impl MacResult for MacroResult {
fn make_def(&self) -> Option<::syntax::ext::base::MacroDef> { None }
fn make_expr(&self) -> Option<::std::gc::Gc<::syntax::ast::Expr>> { None }
fn make_pat(&self) -> Option<::std::gc::Gc<::syntax::ast::Pat>> { None }
fn make_stmt(&self) -> Option<::std::gc::Gc<::syntax::ast::Stmt>> { None }
fn | (&self) -> Option<::syntax::util::small_vector::SmallVector<::std::gc::Gc<Item>>> {
Some(::syntax::util::small_vector::SmallVector::many(self.content.clone()))
}
}
// handler for generate_gl_bindings!
fn macro_handler(ecx: &mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'static> {
// getting the arguments from the macro
let (api, profile, version, generator, extensions) = match parse_macro_arguments(ecx, span.clone(), token_tree) {
Some(t) => t,
None => return DummyResult::any(span)
};
let (ns, source) = match api.as_slice() {
"gl" => (Gl, khronos_api::GL_XML),
"glx" => {
ecx.span_err(span, "glx generation unimplemented");
return DummyResult::any(span)
},
"wgl" => {
ecx.span_err(span, "wgl generation unimplemented");
return DummyResult::any(span)
}
ns => {
ecx.span_err(span, format!("Unexpected opengl namespace '{}'", ns).as_slice());
return DummyResult::any(span)
}
};
let filter = Some(Filter {
extensions: extensions,
profile: profile,
version: version,
api: api,
});
// generating the registry of all bindings
let reg = {
use std::io::BufReader;
use std::task;
let result = task::try(proc() {
let reader = BufReader::new(source.as_bytes());
Registry::from_xml(reader, ns, filter)
});
match result {
Ok(reg) => reg,
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while parsing the registry");
}
}
return DummyResult::any(span);
}
}
};
// generating the Rust bindings as a source code into "buffer"
let buffer = {
use std::io::MemWriter;
use std::task;
// calling the generator
let result = match generator.as_slice() {
"static" => task::try(proc() {
let mut buffer = MemWriter::new();
StaticGenerator::write(&mut buffer, ®, ns);
buffer
}),
"struct" => task::try(proc() {
let mut buffer = MemWriter::new();
StructGenerator::write(&mut buffer, ®, ns);
buffer
}),
generator => {
ecx.span_err(span, format!("unknown generator type: {}", generator).as_slice());
return DummyResult::any(span);
},
};
// processing the result
match result {
Ok(buffer) => buffer.unwrap(),
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while generating the bindings");
}
}
return DummyResult::any(span);
}
}
};
// creating a new Rust parser from these bindings
let content = match String::from_utf8(buffer) {
Ok(s) => s,
Err(err) => {
ecx.span_err(span, format!("{}", err).as_slice());
return DummyResult::any(span)
}
};
let mut parser = ::syntax::parse::new_parser_from_source_str(ecx.parse_sess(), ecx.cfg(),
Path::new(ecx.codemap().span_to_filename(span)).display().to_string(), content);
// getting all the items defined by the bindings
let mut items = Vec::new();
loop {
match parser.parse_item_with_outer_attributes() {
None => break,
Some(i) => items.push(i)
}
}
if !parser.eat(&token::EOF) {
ecx.span_err(span, "the rust parser failed to compile all the generated bindings (meaning there is a bug in this library!)");
return DummyResult::any(span)
}
box MacroResult { content: items } as Box<MacResult>
}
fn parse_macro_arguments(ecx: &mut ExtCtxt, span: Span, tts: &[syntax::ast::TokenTree])
-> Option<(String, String, String, String, Vec<String>)>
{
// getting parameters list
let values = match get_exprs_from_tts(ecx, span, tts) {
Some(v) => v,
None => return None
};
if values.len() != 4 && values.len() != 5 {
ecx.span_err(span, format!("expected 4 or 5 arguments but got {}", values.len())
.as_slice());
return None;
}
// computing the extensions (last parameter)
let extensions: Vec<String> = match values.as_slice().get(4) {
None => Vec::new(),
Some(vector) => {
use syntax::ast::ExprVec;
match vector.node {
// only [ ... ] is accepted
ExprVec(ref list) => {
// turning each element into a string
let mut result = Vec::new();
for element in list.iter() {
match expr_to_string(ecx, element.clone(), "expected string literal") {
Some((s, _)) => result.push(s.get().to_string()),
None => return None
}
}
result
},
_ => {
ecx.span_err(span, format!("last argument must be a vector").as_slice());
return None;
}
}
}
};
// computing other parameters
match (
expr_to_string(ecx, values[0].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[1].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[2].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[3].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() })
) {
(Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d, extensions)),
_ => None
}
}
| make_items | identifier_name |
lib.rs | // Copyright 2013-2014 The gl-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # gl_generator
//!
//! `gl_generator` is an OpenGL bindings generator plugin. It defines a macro named
//! `generate_gl_bindings!` which can be used to generate all constants and functions of a
//! given OpenGL version.
//!
//! ## Example
//!
//! ```rust
//! #[phase(plugin)]
//! extern crate gl_generator;
//! extern crate libc;
//!
//! use std::mem;
//! use self::types::*;
//!
//! generate_gl_bindings!("gl", "core", "4.5", "static", [ "GL_EXT_texture_filter_anisotropic" ])
//! ```
//!
//! ## Parameters
//!
//! * API: Can be `gl`, `wgl`, `glx`, `egl`. Only `gl` is supported for the moment.
//! * Profile: Can be `core` or `compatibility`. `core` will only include all functions supported
//! by the requested version it self, while `compatibility` will include all the functions from
//! previous versions as well.
//! * Version: The requested OpenGL version in the format `x.x`.
//! * Generator: Can be `static` or `struct`.
//! * Extensions (optional): An array of extensions to include in the bindings.
//!
#![crate_name = "gl_generator"]
#![comment = "OpenGL function loader generator."]
#![license = "ASL2"]
#![crate_type = "dylib"]
#![feature(phase)]
#![feature(globs)]
#![feature(macro_rules)]
#![feature(plugin_registrar)]
#![feature(quote)]
#[phase(plugin, link)]
extern crate log;
extern crate khronos_api;
extern crate rustc;
extern crate syntax;
use std::path::Path;
use std::io::{File, Reader};
use syntax::parse::token;
use syntax::ast::{ Item, TokenTree };
use syntax::ext::base::{expr_to_string, get_exprs_from_tts, DummyResult, ExtCtxt, MacResult};
use syntax::codemap::Span;
use registry::*;
use static_gen::StaticGenerator;
use struct_gen::StructGenerator;
mod common;
pub mod static_gen;
pub mod struct_gen;
pub mod registry;
pub mod ty;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut ::rustc::plugin::Registry) {
reg.register_macro("generate_gl_bindings", macro_handler);
}
// this is the object that we will return from the "generate_gl_bindings" macro expansion
struct MacroResult {
content: Vec<::std::gc::Gc<Item>>
}
impl MacResult for MacroResult {
fn make_def(&self) -> Option<::syntax::ext::base::MacroDef> { None }
fn make_expr(&self) -> Option<::std::gc::Gc<::syntax::ast::Expr>> { None }
fn make_pat(&self) -> Option<::std::gc::Gc<::syntax::ast::Pat>> { None }
fn make_stmt(&self) -> Option<::std::gc::Gc<::syntax::ast::Stmt>> { None }
fn make_items(&self) -> Option<::syntax::util::small_vector::SmallVector<::std::gc::Gc<Item>>> {
Some(::syntax::util::small_vector::SmallVector::many(self.content.clone()))
}
}
// handler for generate_gl_bindings!
fn macro_handler(ecx: &mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'static> {
// getting the arguments from the macro
let (api, profile, version, generator, extensions) = match parse_macro_arguments(ecx, span.clone(), token_tree) {
Some(t) => t,
None => return DummyResult::any(span)
};
let (ns, source) = match api.as_slice() {
"gl" => (Gl, khronos_api::GL_XML),
"glx" => {
ecx.span_err(span, "glx generation unimplemented");
return DummyResult::any(span)
},
"wgl" => {
ecx.span_err(span, "wgl generation unimplemented");
return DummyResult::any(span)
}
ns => {
ecx.span_err(span, format!("Unexpected opengl namespace '{}'", ns).as_slice());
return DummyResult::any(span)
}
};
let filter = Some(Filter {
extensions: extensions,
profile: profile,
version: version,
api: api,
});
// generating the registry of all bindings
let reg = {
use std::io::BufReader;
use std::task;
let result = task::try(proc() {
let reader = BufReader::new(source.as_bytes());
Registry::from_xml(reader, ns, filter)
});
match result {
Ok(reg) => reg,
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while parsing the registry");
}
}
return DummyResult::any(span);
}
}
};
// generating the Rust bindings as a source code into "buffer"
let buffer = {
use std::io::MemWriter;
use std::task;
// calling the generator
let result = match generator.as_slice() {
"static" => task::try(proc() {
let mut buffer = MemWriter::new();
StaticGenerator::write(&mut buffer, ®, ns);
buffer
}),
"struct" => task::try(proc() {
let mut buffer = MemWriter::new();
StructGenerator::write(&mut buffer, ®, ns);
buffer
}),
generator => {
ecx.span_err(span, format!("unknown generator type: {}", generator).as_slice());
return DummyResult::any(span);
},
};
// processing the result
match result {
Ok(buffer) => buffer.unwrap(),
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while generating the bindings");
}
}
return DummyResult::any(span);
}
}
};
// creating a new Rust parser from these bindings
let content = match String::from_utf8(buffer) {
Ok(s) => s,
Err(err) => {
ecx.span_err(span, format!("{}", err).as_slice()); | Path::new(ecx.codemap().span_to_filename(span)).display().to_string(), content);
// getting all the items defined by the bindings
let mut items = Vec::new();
loop {
match parser.parse_item_with_outer_attributes() {
None => break,
Some(i) => items.push(i)
}
}
if !parser.eat(&token::EOF) {
ecx.span_err(span, "the rust parser failed to compile all the generated bindings (meaning there is a bug in this library!)");
return DummyResult::any(span)
}
box MacroResult { content: items } as Box<MacResult>
}
fn parse_macro_arguments(ecx: &mut ExtCtxt, span: Span, tts: &[syntax::ast::TokenTree])
-> Option<(String, String, String, String, Vec<String>)>
{
// getting parameters list
let values = match get_exprs_from_tts(ecx, span, tts) {
Some(v) => v,
None => return None
};
if values.len() != 4 && values.len() != 5 {
ecx.span_err(span, format!("expected 4 or 5 arguments but got {}", values.len())
.as_slice());
return None;
}
// computing the extensions (last parameter)
let extensions: Vec<String> = match values.as_slice().get(4) {
None => Vec::new(),
Some(vector) => {
use syntax::ast::ExprVec;
match vector.node {
// only [ ... ] is accepted
ExprVec(ref list) => {
// turning each element into a string
let mut result = Vec::new();
for element in list.iter() {
match expr_to_string(ecx, element.clone(), "expected string literal") {
Some((s, _)) => result.push(s.get().to_string()),
None => return None
}
}
result
},
_ => {
ecx.span_err(span, format!("last argument must be a vector").as_slice());
return None;
}
}
}
};
// computing other parameters
match (
expr_to_string(ecx, values[0].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[1].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[2].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[3].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() })
) {
(Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d, extensions)),
_ => None
}
} | return DummyResult::any(span)
}
};
let mut parser = ::syntax::parse::new_parser_from_source_str(ecx.parse_sess(), ecx.cfg(), | random_line_split |
lib.rs | // Copyright 2013-2014 The gl-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # gl_generator
//!
//! `gl_generator` is an OpenGL bindings generator plugin. It defines a macro named
//! `generate_gl_bindings!` which can be used to generate all constants and functions of a
//! given OpenGL version.
//!
//! ## Example
//!
//! ```rust
//! #[phase(plugin)]
//! extern crate gl_generator;
//! extern crate libc;
//!
//! use std::mem;
//! use self::types::*;
//!
//! generate_gl_bindings!("gl", "core", "4.5", "static", [ "GL_EXT_texture_filter_anisotropic" ])
//! ```
//!
//! ## Parameters
//!
//! * API: Can be `gl`, `wgl`, `glx`, `egl`. Only `gl` is supported for the moment.
//! * Profile: Can be `core` or `compatibility`. `core` will only include all functions supported
//! by the requested version it self, while `compatibility` will include all the functions from
//! previous versions as well.
//! * Version: The requested OpenGL version in the format `x.x`.
//! * Generator: Can be `static` or `struct`.
//! * Extensions (optional): An array of extensions to include in the bindings.
//!
#![crate_name = "gl_generator"]
#![comment = "OpenGL function loader generator."]
#![license = "ASL2"]
#![crate_type = "dylib"]
#![feature(phase)]
#![feature(globs)]
#![feature(macro_rules)]
#![feature(plugin_registrar)]
#![feature(quote)]
#[phase(plugin, link)]
extern crate log;
extern crate khronos_api;
extern crate rustc;
extern crate syntax;
use std::path::Path;
use std::io::{File, Reader};
use syntax::parse::token;
use syntax::ast::{ Item, TokenTree };
use syntax::ext::base::{expr_to_string, get_exprs_from_tts, DummyResult, ExtCtxt, MacResult};
use syntax::codemap::Span;
use registry::*;
use static_gen::StaticGenerator;
use struct_gen::StructGenerator;
mod common;
pub mod static_gen;
pub mod struct_gen;
pub mod registry;
pub mod ty;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(reg: &mut ::rustc::plugin::Registry) {
reg.register_macro("generate_gl_bindings", macro_handler);
}
// this is the object that we will return from the "generate_gl_bindings" macro expansion
struct MacroResult {
content: Vec<::std::gc::Gc<Item>>
}
impl MacResult for MacroResult {
fn make_def(&self) -> Option<::syntax::ext::base::MacroDef> { None }
fn make_expr(&self) -> Option<::std::gc::Gc<::syntax::ast::Expr>> { None }
fn make_pat(&self) -> Option<::std::gc::Gc<::syntax::ast::Pat>> { None }
fn make_stmt(&self) -> Option<::std::gc::Gc<::syntax::ast::Stmt>> { None }
fn make_items(&self) -> Option<::syntax::util::small_vector::SmallVector<::std::gc::Gc<Item>>> {
Some(::syntax::util::small_vector::SmallVector::many(self.content.clone()))
}
}
// handler for generate_gl_bindings!
fn macro_handler(ecx: &mut ExtCtxt, span: Span, token_tree: &[TokenTree]) -> Box<MacResult+'static> |
fn parse_macro_arguments(ecx: &mut ExtCtxt, span: Span, tts: &[syntax::ast::TokenTree])
-> Option<(String, String, String, String, Vec<String>)>
{
// getting parameters list
let values = match get_exprs_from_tts(ecx, span, tts) {
Some(v) => v,
None => return None
};
if values.len() != 4 && values.len() != 5 {
ecx.span_err(span, format!("expected 4 or 5 arguments but got {}", values.len())
.as_slice());
return None;
}
// computing the extensions (last parameter)
let extensions: Vec<String> = match values.as_slice().get(4) {
None => Vec::new(),
Some(vector) => {
use syntax::ast::ExprVec;
match vector.node {
// only [ ... ] is accepted
ExprVec(ref list) => {
// turning each element into a string
let mut result = Vec::new();
for element in list.iter() {
match expr_to_string(ecx, element.clone(), "expected string literal") {
Some((s, _)) => result.push(s.get().to_string()),
None => return None
}
}
result
},
_ => {
ecx.span_err(span, format!("last argument must be a vector").as_slice());
return None;
}
}
}
};
// computing other parameters
match (
expr_to_string(ecx, values[0].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[1].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[2].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() }),
expr_to_string(ecx, values[3].clone(), "expected string literal")
.map(|e| match e { (s, _) => s.get().to_string() })
) {
(Some(a), Some(b), Some(c), Some(d)) => Some((a, b, c, d, extensions)),
_ => None
}
}
| {
// getting the arguments from the macro
let (api, profile, version, generator, extensions) = match parse_macro_arguments(ecx, span.clone(), token_tree) {
Some(t) => t,
None => return DummyResult::any(span)
};
let (ns, source) = match api.as_slice() {
"gl" => (Gl, khronos_api::GL_XML),
"glx" => {
ecx.span_err(span, "glx generation unimplemented");
return DummyResult::any(span)
},
"wgl" => {
ecx.span_err(span, "wgl generation unimplemented");
return DummyResult::any(span)
}
ns => {
ecx.span_err(span, format!("Unexpected opengl namespace '{}'", ns).as_slice());
return DummyResult::any(span)
}
};
let filter = Some(Filter {
extensions: extensions,
profile: profile,
version: version,
api: api,
});
// generating the registry of all bindings
let reg = {
use std::io::BufReader;
use std::task;
let result = task::try(proc() {
let reader = BufReader::new(source.as_bytes());
Registry::from_xml(reader, ns, filter)
});
match result {
Ok(reg) => reg,
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while parsing the registry");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while parsing the registry");
}
}
return DummyResult::any(span);
}
}
};
// generating the Rust bindings as a source code into "buffer"
let buffer = {
use std::io::MemWriter;
use std::task;
// calling the generator
let result = match generator.as_slice() {
"static" => task::try(proc() {
let mut buffer = MemWriter::new();
StaticGenerator::write(&mut buffer, ®, ns);
buffer
}),
"struct" => task::try(proc() {
let mut buffer = MemWriter::new();
StructGenerator::write(&mut buffer, ®, ns);
buffer
}),
generator => {
ecx.span_err(span, format!("unknown generator type: {}", generator).as_slice());
return DummyResult::any(span);
},
};
// processing the result
match result {
Ok(buffer) => buffer.unwrap(),
Err(err) => {
use std::any::{Any, AnyRefExt};
let err: &Any = err;
match err {
err if err.is::<String>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<String>().unwrap().as_slice());
},
err if err.is::<&'static str>() => {
ecx.span_err(span, "error while generating the bindings");
ecx.span_err(span, err.downcast_ref::<&'static str>().unwrap().as_slice());
},
_ => {
ecx.span_err(span, "unknown error while generating the bindings");
}
}
return DummyResult::any(span);
}
}
};
// creating a new Rust parser from these bindings
let content = match String::from_utf8(buffer) {
Ok(s) => s,
Err(err) => {
ecx.span_err(span, format!("{}", err).as_slice());
return DummyResult::any(span)
}
};
let mut parser = ::syntax::parse::new_parser_from_source_str(ecx.parse_sess(), ecx.cfg(),
Path::new(ecx.codemap().span_to_filename(span)).display().to_string(), content);
// getting all the items defined by the bindings
let mut items = Vec::new();
loop {
match parser.parse_item_with_outer_attributes() {
None => break,
Some(i) => items.push(i)
}
}
if !parser.eat(&token::EOF) {
ecx.span_err(span, "the rust parser failed to compile all the generated bindings (meaning there is a bug in this library!)");
return DummyResult::any(span)
}
box MacroResult { content: items } as Box<MacResult>
} | identifier_body |
mdx15_print_gerber.py | #
# Print a gerber file to the MDX-15, optionally setting the home position
#
# Note: Uses RawFileToPrinter.exe as found at http://www.columbia.edu/~em36/windowsrawprint.html
# Note: Might work with other Roland Modela Models (MDX-20), but I don't have access to such machines, so I cannot test.
#
#
# MIT License
#
# Copyright (c) 2018 Charles Donohue
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import os
import time
import sys
import threading
import traceback
import math
import msvcrt
import serial
import cv2
import numpy
class GCode2RmlConverter:
# stateful variables
inputConversionFactor = 1.0 # mm units
X = 0.0
Y = 0.0
Z = 0.0
speedmode = None
feedrate = 0.0
isFirstCommand = True
offset_x = 0.0
offset_y = 0.0
feedspeedfactor = 1.0
# Backlash compensation related
backlashX = 0
backlashY = 0
backlashZ = 0
last_x = 0
last_y = 0
last_z = 0
last_displacement_x = 0.0
last_displacement_y = 0.0
last_displacement_z = 0.0
backlash_compensation_x = 0.0
backlash_compensation_y = 0.0
backlash_compensation_z = 0.0
epsilon = 0.001
levelingData = None
manualLevelingPoints = None
def __init__(self,offset_x,offset_y,feedspeedfactor,backlashX,backlashY,backlashZ,levelingData,manualLevelingPoints):
self.moveCommandParseRegex = re.compile(r'G0([01])\s(X([-+]?\d*\.*\d+\s*))?(Y([-+]?\d*\.*\d+\s*))?(Z([-+]?\d*\.*\d+\s*))?')
self.offset_x = offset_x
self.offset_y = offset_y
self.feedspeedfactor = feedspeedfactor
self.backlashX = backlashX
self.backlashY = backlashY
self.backlashZ = backlashZ
self.levelingData = levelingData
self.manualLevelingPoints = manualLevelingPoints
def digestStream(self, lineIterator):
outputCommands = []
for line in lineIterator :
outputCommands.extend( self.digestLine(line) )
return outputCommands
def digestLine(self,line):
outputCommands = []
if self.isFirstCommand :
self.isFirstCommand = False
# Initialization commands
outputCommands.append('^DF') # set to defaults
#outputCommands.append('! 1;Z 0,0,813') # not sure what this does. Maybe starts the spindle? TODO: Try without.
line = line.rstrip() # strip line endings
#print('cmd: '+line)
if line == None or len(line) == 0 :
pass # empty line
elif line.startswith('(') :
pass # comment line
elif line == 'G20' : # units as inches
self.inputConversionFactor = 25.4
elif line == 'G21' : # units as mm
self.inputConversionFactor = 1.0
elif line == 'G90' : # absolute mode
pass # implied
elif line == 'G94' : # Feed rate units per minute mode
pass # implied
elif line == 'M03' : # spindle on
pass
elif line == 'M05' : # spindle off
outputCommands.append('^DF;!MC0;')
outputCommands.append('H')
elif line.startswith('G01 F'): # in flatcam 2018, the feed rate is set in a move command
self.feedrate = float(line[5:])
elif line.startswith('G00') or line.startswith('G01'): # move
outputCommands.extend( self.processMoveCommand(line) )
elif line.startswith('G4 P'): # dwell
dwelltime = int(line[4:])
outputCommands.append('W {}'.format( dwelltime ) )
elif line.startswith('F'): # feed rate
self.feedrate = float(line[1:])
# ...
else :
print('Unrecognized command: ' + line)
pass
return outputCommands
def getHeightFor3PointPlane( self, p1,p2,p3, x, y ):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
v1 = [x3 - x1, y3 - y1, z3 - z1]
v2 = [x2 - x1, y2 - y1, z2 - z1]
cp = [v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0]]
a, b, c = cp
d = a * x1 + b * y1 + c * z1
z = (d - a * x - b * y) / float(c)
return z
def processMoveCommand(self, line):
#print(line)
outputCommands = []
g = self.moveCommandParseRegex.match(line)
if self.speedmode != g.group(1) :
self.speedmode = g.group(1)
#print( 'speed changed: ' + self.speedmode )
f = self.feedrate * self.inputConversionFactor * self.feedspeedfactor / 60.0 # convert to mm per second
if self.speedmode == '0' : f = 16.0 # fast mode
outputCommands.append('V {0:.2f};F {0:.2f}'.format(f))
if g.group(3) != None : self.X = float(g.group(3)) * self.inputConversionFactor
if g.group(5) != None : self.Y = float(g.group(5)) * self.inputConversionFactor
if g.group(7) != None : self.Z = float(g.group(7)) * self.inputConversionFactor
#outputScale = 1 / 0.01
outputScale = 1 / 0.025
# Z height correction
z_correction = 0.0
if self.levelingData != None :
n = len( self.levelingData[0] )
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
# Find quadrant in which point lies
i = 0
j = 0
while i < (n-2) :
if px >= (self.levelingData[i][j][0]-self.epsilon) and px < self.levelingData[i+1][j][0] : break
i = i+1
while j < (n-2) :
if py >= (self.levelingData[i][j][1]-self.epsilon) and py < self.levelingData[i][j+1][1] : break
j = j+1
# interpolate values
px0 = self.levelingData[i][j][0]
px1 = self.levelingData[i+1][j][0]
fx = (px - px0) / (px1 - px0)
h00 = self.levelingData[i][j][2]
h10 = self.levelingData[i+1][j][2]
h0 = h00 + (h10 - h00) * fx
h01 = self.levelingData[i][j+1][2]
h11 = self.levelingData[i+1][j+1][2]
h1 = h01 + (h11 - h01) * fx
py0 = self.levelingData[i][j][1]
py1 = self.levelingData[i][j+1][1]
fy = (py - py0) / (py1 - py0)
h = h0 + (h1 - h0) * fy
#print(px,py,i,j,fx,fy,self.Z,h,h/outputScale)
z_correction = -h
# Apply compensation to Z
#self.Z = self.Z - h/outputScale
# Manual leveling points
elif self.manualLevelingPoints != None :
if len(self.manualLevelingPoints) < 3 :
pass # At least 3 points required
else :
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
h = self.getHeightFor3PointPlane( self.manualLevelingPoints[0], self.manualLevelingPoints[1], self.manualLevelingPoints[2], px, py )
z_correction = +h
pass
# Backlash handling in X
if abs(self.backlashX) > self.epsilon :
deltaX = self.X - self.last_x
if abs(deltaX) > self.epsilon : # non-zero move in that axis
if deltaX * self.last_displacement_x < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_x = 0.0 if deltaX > 0 else -self.backlashX
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_x = deltaX;
# Backlash handling in Y
if abs(self.backlashY) > self.epsilon :
deltaY = self.Y - self.last_y
if abs(deltaY) > self.epsilon : # non-zero move in that axis
if deltaY * self.last_displacement_y < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_y = 0.0 if deltaY > 0 else -self.backlashY
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_y = deltaY;
# Backlash handling in Z
if abs(self.backlashZ) > self.epsilon :
deltaZ = self.Z - self.last_z
if abs(deltaZ) > self.epsilon : # non-zero move in that axis
if deltaZ * self.last_displacement_z < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_z = 0.0 if deltaZ > 0 else -self.backlashZ
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_+z_correction))
self.last_displacement_z = deltaZ;
self.last_x = self.X
self.last_y = self.Y
self.last_z = self.Z
# Send move command
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.X*outputScale+self.offset_x+self.backlash_compensation_x, self.Y*outputScale+self.offset_y+self.backlash_compensation_y, self.Z*outputScale+self.backlash_compensation_z+z_correction))
return outputCommands
def convertFile(self,infile,outfile):
# TODO: Handle XY offsets
inputdata = open(infile)
outdata = self.digestStream(inputdata)
outfile = open(outfile,'w')
for cmd in outdata :
outfile.write(cmd)
outfile.write('\n')
#print(cmd)
##################################################
class ModelaZeroControl:
# Constants
XY_INCREMENTS = 1
XY_INCREMENTS_LARGE= 100
Z_INCREMENTS = 1
Z_INCREMENTS_MED = 10
Z_INCREMENTS_LARGE = 100
Z_DEFAULT_OFFSET = -1300.0
FAST_TRAVEL_RATE = 600.0
Y_MAX = 4064.0
X_MAX = 6096.0
comport = None
ser = None
z_offset = 0.0
x = 0.0
y = 0.0
z = 0.0
last_x = 0.0
last_y = 0.0
last_z = 0.0
microscope_leveling_startpoint = None
microscope_leveling_endpoint = None
connected = False
hasZeroBeenSet = False
exitRequested = False
xy_zero = (0.0,0.0)
manual_leveling_points = None
def __init__(self,comport):
self.comport = comport
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
self.ser.close()
self.ser = None
self.connected = True
except serial.serialutil.SerialException as e :
print('Could not open '+comport)
self.connected = False
#sys.exit(1)
def sendCommand(self,cmd):
#print(cmd)
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
txt = cmd + '\n'
self.ser.write(txt.encode('ascii'))
self.ser.close()
self.ser = None
except serial.serialutil.SerialException as e :
#print(e)
print('Error writing to '+self.comport)
self.connected = False
#sys.exit(1)
def sendMoveCommand(self,wait=False):
|
def run(self):
print('If the green light next to the VIEW button is lit, please press the VIEW button.')
print('Usage:')
print('\th - send to home')
print('\tz - Set Z zero')
print('\tZ - send to zero')
print('\twasd - move on the XY plane (+shift for small increments)')
print('\tup/down - move in the Z axis (+CTRL for medium increments, +ALT for large increments)')
print('\t1 - Set Microscope-based levelling starting point (both points must be set for autolevelling to happen)')
print('\t2 - Set Microscope-based levelling ending point')
print('\tm - Add manual levelling ending point (wrt zero, which must be set)')
print('\tq - Quit and move to next step.')
print('\tCTRL-C / ESC - Exit program.')
self.sendCommand('^IN;!MC0;H') # clear errors, disable spindle, return home
self.z_offset = self.Z_DEFAULT_OFFSET
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero half way
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.spindleEnabled = False
self.sendMoveCommand(True)
self.xy_zero = (0.0,0.0)
while True : #self.connected :
c = msvcrt.getwche()
n = 0
#print(c)
if c == '\xe0' or c == '\x00' :
c = msvcrt.getwche()
n = ord(c)
#print(c,n)
if ( c == 'q' and n == 0 ) :
if not self.hasZeroBeenSet :
print('Would you like to set the current position as the Zero (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
self.setZeroHere()
print('Done')
return self.xy_zero
elif c == 'h' :
self.sendCommand('^DF;!MC0;H')
elif c == 'Z' :
(self.x,self.y) = self.xy_zero
self.z = 0.0
self.sendMoveCommand(True)
elif c == 'w' and n == 0 :
self.y += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 's' and n == 0 :
self.y -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'd' and n == 0 :
self.x += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'a' and n == 0 :
self.x -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'W' and n == 0 :
self.y += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'S' and n == 0 :
self.y -= self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'D' and n == 0 :
self.x += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'A' and n == 0 :
self.x -= self.XY_INCREMENTS
self.sendMoveCommand()
elif n == 72 : # up arrow
self.z += self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 80 : # down arrow
self.z -= self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 141 : # ctrl + up arrow
self.z += self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 145 : # ctrl + down arrow
self.z -= self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 152 : # alt + up arrow
self.z += self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif n == 160 : # alt + down arrow
self.z -= self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'z' and n == 0 :
self.setZeroHere()
elif c == 'm' and n == 0 :
self.setLevelingPointHere()
elif n == 75 : # left arrow
#self.sendCommand('^DF;!MC0;') # disable spindle
self.spindleEnabled = False
self.sendMoveCommand()
elif n == 77 : # right arrow
#self.sendCommand('^DF;!MC1;') # enable spindle
self.spindleEnabled = True
self.sendMoveCommand()
elif c == '1' :
self.microscope_leveling_startpoint = (self.x,self.y,self.z)
print('Setting leveling point 1 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif c == '2' :
self.microscope_leveling_endpoint = (self.x,self.y,self.z)
print('Setting leveling point 2 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif ord(c) == 27 : # Esc
self.exitRequested = True
return self.xy_zero
elif ord(c) == 3 : # CTRL-C
self.exitRequested = True
return self.xy_zero
else :
print( 'you entered: ' + str(n if n != 0 else ord(c) ))
pass
return self.xy_zero
def setZeroHere(self) :
print('Setting zero')
self.z_offset = self.z_offset + self.z
self.z = 0.0
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero to current
self.xy_zero = (self.x,self.y)
self.hasZeroBeenSet = True
if self.manual_leveling_points != None :
print('Warning: previously set manual leveling points lost.')
self.manual_leveling_points = None
return self.xy_zero
def setLevelingPointHere(self):
if not self.hasZeroBeenSet :
print('Warning: zero must be set before setting the leveling point. Setting it here.')
self.setZeroHere()
else :
if self.manual_leveling_points == None:
self.manual_leveling_points = [ (self.xy_zero[0],self.xy_zero[1],0.0) ]
print('Adding leveling point {} ({:.3f},{:.3f},{:.3f})'.format(len(self.manual_leveling_points),self.x,self.y,self.z))
self.manual_leveling_points.append( (self.x,self.y,self.z) )
def getManualLevelingPoints(self):
return self.manual_leveling_points
def moveTo(self,x,y,z,wait=False):
self.x = x
self.y = y
self.z = z
self.sendMoveCommand(wait)
def getAutolevelingData(self, cam, steps=1, heightpoints=50) :
if self.microscope_leveling_startpoint != None and self.microscope_leveling_endpoint != None :
print(self.microscope_leveling_startpoint,self.microscope_leveling_endpoint)
(x1,y1,z1) = self.microscope_leveling_startpoint
(x2,y2,z2) = self.microscope_leveling_endpoint
startingHeight = z1 + heightpoints/2
self.moveTo(x1,y1,z1,wait=True) # Go to start
#print(p1,p2)
heights = [[(0,0,0) for i in range(steps+1)] for j in range(steps+1)]
for i in range(steps+1) :
for j in range(steps+1) :
#print(i,j)
fx = float(i) / (steps)
fy = float(j) / (steps)
px = x1 + (x2-x1) * fx
py = y1 + (y2-y1) * fy
#print(px,py)
#print(i,j,interpolatedPosition)
focusValues = []
self.moveTo(px,py,startingHeight+5,wait=True)
for k in range(heightpoints):
h = startingHeight - k * 1.0
self.moveTo(px,py,h,wait=False)
time.sleep(0.033) # Take some time for focus value to settle
focusval = cam.getFocusValue()
#print(focusval)
focusValues.append( focusval )
#print(focusValues)
maxrank = numpy.argmax(focusValues)
self.moveTo(px,py,startingHeight-maxrank*1.0,wait=True)
# # TODO: Find max focus height position using curve fit
# poly_rank = 7
# focusValues_indexes = range(len(focusValues))
# polynomial = numpy.poly1d(numpy.polyfit(focusValues_indexes,focusValues,poly_rank))
# numpts = 500
# maxrank_high = numpy.argmax(polynomial(numpy.linspace(0, steps, numpts)))
# maxrank = ( maxrank_high / (numpts-1) ) * steps
# print(px,py,maxrank_high,maxrank)
heights[i][j] = ( px,py, maxrank)
# Bias results relative to initial point, at origin
(x0,y0,home_rank) = heights[0][0]
for i in range(steps+1) :
for j in range(steps+1) :
(x,y,r) = heights[i][j]
x = x - x0
y = y - y0
r = r - home_rank
heights[i][j] = (x,y,r)
#print(heights)
for col in heights :
print(col)
return heights
return None
##################################################
class MicroscopeFeed:
loopthread = None
threadlock = None
endLoopRequest = False
focusValue = 0.0
vidcap = None
connected = False
def __init__(self,channel):
self.channel = channel
self.threadlock = threading.Lock()
self.loopthread = threading.Thread(target=self.loopThread)
self.vidcap = cv2.VideoCapture(self.channel)
if self.vidcap.isOpened() :
self.connected = True
else :
print('Microscope connection could not be established.')
def isConnected(self):
return self.connected
def startLoop(self):
self.loopthread.start()
def loopThread(self):
if not self.vidcap.isOpened() : return
smoothed_laplacian_variance = 0.0
while True :
chk,frame = self.vidcap.read()
height, width = frame.shape[:2]
sz = 0.20 * width
x0 = int(width/2 - sz/2)
x1 = int(width/2 + sz/2)
y0 = int(height/2 - sz/2)
y1 = int(height/2 + sz/2)
center_frame = frame[ y0:y1, x0:x1 ]
center_gray = cv2.cvtColor(center_frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('center',center_gray)
laplacian = cv2.Laplacian(center_gray,cv2.CV_64F)
#cv2.imshow('laplacian',laplacian)
v = laplacian.var()
#smoothed_v_factor = 0.25
smoothed_v_factor = 0.50
smoothed_laplacian_variance = v * smoothed_v_factor + smoothed_laplacian_variance * (1.0-smoothed_v_factor)
#print('{:.0f} - {:.0f}'.format(v,smoothed_laplacian_variance))
cv2.rectangle(frame, (x0, y0), (x1, y1),(0,255,0), 2)
#textpos = (x0, y0)
textpos = (10, 20)
cv2.putText(frame, 'v = {:.2f} {:.2f}'.format(v,smoothed_laplacian_variance),textpos,cv2.FONT_HERSHEY_DUPLEX,0.8,(225,0,0))
cv2.namedWindow('vidcap', cv2.WINDOW_NORMAL)
cv2.imshow('vidcap',frame)
cv2.waitKey(1) # Required for video to be displayed
with self.threadlock :
self.focusValue = smoothed_laplacian_variance
if self.endLoopRequest :
self.vidcap.release()
cv2.destroyAllWindows()
break
def endLoop(self):
with self.threadlock :
self.endLoopRequest = True
self.loopthread.join()
def getFocusValue(self):
f = 0.0
with self.threadlock :
f = self.focusValue
return f
##################################################
def main():
import optparse
parser = optparse.OptionParser('usage%prog -i <input file>')
parser.add_option('-i', '--infile', dest='infile', default='', help='The input gcode file, as exported by FlatCam.')
parser.add_option('-o', '--outfile', dest='outfile', default='', help='The output RML-1 file.')
parser.add_option("-z", '--zero', dest='zero', action="store_true", default=False, help='Zero the print head on the work surface.')
#parser.add_option('-s', '--serialport', dest='serialport', default='', help='The com port for the MDX-15. (Default: obtained from the printer driver)')
parser.add_option("-p", '--print', dest='print', action="store_true", default=False, help='Prints the RML-1 data.')
parser.add_option('-n', '--printerName', dest='printerName', default='Roland MODELA MDX-15', help='The windows printer name. (Default: Roland MODELA MDX-15)')
parser.add_option('-f', '--feedspeedfactor', dest='feedspeedfactor', default=1.0, help='Feed rate scaling factor (Default: 1.0)')
parser.add_option('--backlashX', dest='backlashX', default=0.0, help='Backlash compensation in X direction (in steps).')
parser.add_option('--backlashY', dest='backlashY', default=0.0, help='Backlash compensation in y direction (in steps).')
parser.add_option('--backlashZ', dest='backlashZ', default=0.0, help='Backlash compensation in z direction (in steps).')
parser.add_option('--levelingsegments', dest='levelingsegments', default=1, help='Number of segments to split the work area for microscope-based leveling. (Default: 1)')
parser.add_option('-m','--microscope', dest='microscope', default=False, help='Enable microscope on channel N')
(options,args) = parser.parse_args()
#print(options)
debugmode = False
# Find serial port number using the printer driver.
serialport = ''
if options.zero : # Printer driver is only required if we want to set the zero
import subprocess
shelloutput = subprocess.check_output('powershell -Command "(Get-WmiObject Win32_Printer -Filter \\"Name=\'{}\'\\").PortName"'.format(options.printerName))
if len(shelloutput)>0 :
try :
serialport = shelloutput.decode('utf-8').split(':')[0]
print( 'Found {} printer driver ({})'.format(options.printerName,serialport) )
except:
print('Error parsing com port: ' + str(shelloutput) )
else :
print('Could not find the printer driver for: ' + options.printerName)
if not debugmode :
sys.exit(1)
# Start microscope feed if requested
mic = None
if options.microscope != False :
mic = MicroscopeFeed( int(options.microscope) )
mic.startLoop()
#msvcrt.getwch()
#print( mic.getFocusValue() )
try:
# Manually set zero and microscope set points
x_offset = 0.0
y_offset = 0.0
modelaZeroControl = None
manualLevelingPoints = None
if options.zero :
modelaZeroControl = ModelaZeroControl(serialport)
if modelaZeroControl.connected or debugmode :
print('Setting Zero')
(x_offset,y_offset) = modelaZeroControl.run()
manualLevelingPoints = modelaZeroControl.getManualLevelingPoints()
if modelaZeroControl.exitRequested :
print('Terminating program.')
sys.exit(1)
else :
print('Could not connect to the printer to set the zero.')
# Find bed level using microscope focus
levelingData = None
if mic != None and mic.isConnected() and modelaZeroControl != None :
try:
levelingData = modelaZeroControl.getAutolevelingData(mic, steps=int(options.levelingsegments) )
except KeyboardInterrupt :
print('Leveling cancelled, terminating program.')
sys.exit(1)
# gcode to rml conversion
if options.infile != '' :
if options.outfile == '' : options.outfile = options.infile + '.prn'
print('Converting {} to {}'.format(options.infile,options.outfile))
converter = GCode2RmlConverter(x_offset, y_offset, float(options.feedspeedfactor), float(options.backlashX), float(options.backlashY), float(options.backlashZ), levelingData, manualLevelingPoints )
converter.convertFile( options.infile, options.outfile )
# Send RML code to the printer driver.
if options.print :
if options.outfile != '' :
print('Are you ready to print (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
print('Printing: '+options.outfile)
os.system('RawFileToPrinter.exe "{}" "{}"'.format(options.outfile,options.printerName))
print('Procedure to cancel printing:')
print('1) Press the VIEW button on the printer.')
print('2) Cancel the print job(s) in windows. (start->Devices and Printers->...)')
print('3) Remove the usb cable to the printer.')
print('4) Press both the UP and Down buttons on the printer.')
print('5) When VIEW light stops blinking, press the VIEW butotn.')
print('6) Plug the usb cable back in.')
if mic != None and mic.isConnected() :
# Don't exit now if the camera is connected, in case we want visual feedback
print('Press any key to exit.')
msvcrt.getwch()
else :
print('Error: No file to be printed.')
except Exception as e:
#print(e)
traceback.print_exc()
# Release video stream
if mic != None :
mic.endLoop()
if __name__ == "__main__":
if sys.version_info[0] < 3 :
print("This script requires Python version 3")
sys.exit(1)
main() | if self.x < 0.0 : self.x = 0.0
if self.x > self.X_MAX : self.x = self.X_MAX
if self.y < 0.0 : self.y = 0.0
if self.y > self.Y_MAX : self.y = self.Y_MAX
#print('Moving to {:.0f},{:.0f},{:.0f}'.format(self.x,self.y,self.z))
spindle = '1' if self.spindleEnabled else '0'
# The esoteric syntax was borrowed from https://github.com/Craftweeks/MDX-LabPanel
self.sendCommand('^DF;!MC{0};!PZ0,0;V15.0;Z{1:.3f},{2:.3f},{3:.3f};!MC{0};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;'.format(spindle,self.x,self.y,self.z))
# Optional wait for move complete
dx = self.x - self.last_x
self.last_x = self.x
dy = self.y - self.last_y
self.last_y = self.y
dz = self.z - self.last_z
self.last_z = self.z
traveldist = math.sqrt(dx*dx+dy*dy+dz*dz)
if wait :
travelTime = traveldist / self.FAST_TRAVEL_RATE
time.sleep(travelTime)
#print('move done') | identifier_body |
mdx15_print_gerber.py | #
# Print a gerber file to the MDX-15, optionally setting the home position
#
# Note: Uses RawFileToPrinter.exe as found at http://www.columbia.edu/~em36/windowsrawprint.html
# Note: Might work with other Roland Modela Models (MDX-20), but I don't have access to such machines, so I cannot test.
#
#
# MIT License
#
# Copyright (c) 2018 Charles Donohue
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import os
import time
import sys
import threading
import traceback
import math
import msvcrt
import serial
import cv2
import numpy
class GCode2RmlConverter:
# stateful variables
inputConversionFactor = 1.0 # mm units
X = 0.0
Y = 0.0
Z = 0.0
speedmode = None
feedrate = 0.0
isFirstCommand = True
offset_x = 0.0
offset_y = 0.0
feedspeedfactor = 1.0
# Backlash compensation related
backlashX = 0
backlashY = 0
backlashZ = 0
last_x = 0
last_y = 0
last_z = 0
last_displacement_x = 0.0
last_displacement_y = 0.0
last_displacement_z = 0.0
backlash_compensation_x = 0.0
backlash_compensation_y = 0.0
backlash_compensation_z = 0.0
epsilon = 0.001
levelingData = None
manualLevelingPoints = None
def __init__(self,offset_x,offset_y,feedspeedfactor,backlashX,backlashY,backlashZ,levelingData,manualLevelingPoints):
self.moveCommandParseRegex = re.compile(r'G0([01])\s(X([-+]?\d*\.*\d+\s*))?(Y([-+]?\d*\.*\d+\s*))?(Z([-+]?\d*\.*\d+\s*))?')
self.offset_x = offset_x
self.offset_y = offset_y
self.feedspeedfactor = feedspeedfactor
self.backlashX = backlashX
self.backlashY = backlashY
self.backlashZ = backlashZ
self.levelingData = levelingData
self.manualLevelingPoints = manualLevelingPoints
def digestStream(self, lineIterator):
outputCommands = []
for line in lineIterator :
outputCommands.extend( self.digestLine(line) )
return outputCommands
def digestLine(self,line):
outputCommands = []
if self.isFirstCommand :
self.isFirstCommand = False
# Initialization commands
outputCommands.append('^DF') # set to defaults
#outputCommands.append('! 1;Z 0,0,813') # not sure what this does. Maybe starts the spindle? TODO: Try without.
line = line.rstrip() # strip line endings
#print('cmd: '+line)
if line == None or len(line) == 0 :
pass # empty line
elif line.startswith('(') :
pass # comment line
elif line == 'G20' : # units as inches
self.inputConversionFactor = 25.4
elif line == 'G21' : # units as mm
self.inputConversionFactor = 1.0
elif line == 'G90' : # absolute mode
pass # implied
elif line == 'G94' : # Feed rate units per minute mode
pass # implied
elif line == 'M03' : # spindle on
pass
elif line == 'M05' : # spindle off
outputCommands.append('^DF;!MC0;')
outputCommands.append('H')
elif line.startswith('G01 F'): # in flatcam 2018, the feed rate is set in a move command
self.feedrate = float(line[5:])
elif line.startswith('G00') or line.startswith('G01'): # move
outputCommands.extend( self.processMoveCommand(line) )
elif line.startswith('G4 P'): # dwell
dwelltime = int(line[4:])
outputCommands.append('W {}'.format( dwelltime ) )
elif line.startswith('F'): # feed rate
self.feedrate = float(line[1:])
# ...
else :
print('Unrecognized command: ' + line)
pass
return outputCommands
def getHeightFor3PointPlane( self, p1,p2,p3, x, y ):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
v1 = [x3 - x1, y3 - y1, z3 - z1]
v2 = [x2 - x1, y2 - y1, z2 - z1]
cp = [v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0]]
a, b, c = cp
d = a * x1 + b * y1 + c * z1
z = (d - a * x - b * y) / float(c)
return z
def processMoveCommand(self, line):
#print(line)
outputCommands = []
g = self.moveCommandParseRegex.match(line)
if self.speedmode != g.group(1) :
self.speedmode = g.group(1)
#print( 'speed changed: ' + self.speedmode )
f = self.feedrate * self.inputConversionFactor * self.feedspeedfactor / 60.0 # convert to mm per second
if self.speedmode == '0' : f = 16.0 # fast mode
outputCommands.append('V {0:.2f};F {0:.2f}'.format(f))
if g.group(3) != None : self.X = float(g.group(3)) * self.inputConversionFactor
if g.group(5) != None : self.Y = float(g.group(5)) * self.inputConversionFactor
if g.group(7) != None : self.Z = float(g.group(7)) * self.inputConversionFactor
#outputScale = 1 / 0.01
outputScale = 1 / 0.025
# Z height correction
z_correction = 0.0
if self.levelingData != None :
n = len( self.levelingData[0] )
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
# Find quadrant in which point lies
i = 0
j = 0
while i < (n-2) :
if px >= (self.levelingData[i][j][0]-self.epsilon) and px < self.levelingData[i+1][j][0] : break
i = i+1
while j < (n-2) :
if py >= (self.levelingData[i][j][1]-self.epsilon) and py < self.levelingData[i][j+1][1] : break
j = j+1
# interpolate values
px0 = self.levelingData[i][j][0]
px1 = self.levelingData[i+1][j][0]
fx = (px - px0) / (px1 - px0)
h00 = self.levelingData[i][j][2]
h10 = self.levelingData[i+1][j][2]
h0 = h00 + (h10 - h00) * fx
h01 = self.levelingData[i][j+1][2]
h11 = self.levelingData[i+1][j+1][2]
h1 = h01 + (h11 - h01) * fx
py0 = self.levelingData[i][j][1]
py1 = self.levelingData[i][j+1][1]
fy = (py - py0) / (py1 - py0)
h = h0 + (h1 - h0) * fy
#print(px,py,i,j,fx,fy,self.Z,h,h/outputScale)
z_correction = -h
# Apply compensation to Z
#self.Z = self.Z - h/outputScale
# Manual leveling points
elif self.manualLevelingPoints != None :
if len(self.manualLevelingPoints) < 3 :
pass # At least 3 points required
else :
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
h = self.getHeightFor3PointPlane( self.manualLevelingPoints[0], self.manualLevelingPoints[1], self.manualLevelingPoints[2], px, py )
z_correction = +h
pass
# Backlash handling in X
if abs(self.backlashX) > self.epsilon :
deltaX = self.X - self.last_x
if abs(deltaX) > self.epsilon : # non-zero move in that axis
if deltaX * self.last_displacement_x < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_x = 0.0 if deltaX > 0 else -self.backlashX
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_x = deltaX;
# Backlash handling in Y
if abs(self.backlashY) > self.epsilon :
deltaY = self.Y - self.last_y
if abs(deltaY) > self.epsilon : # non-zero move in that axis
if deltaY * self.last_displacement_y < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_y = 0.0 if deltaY > 0 else -self.backlashY
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_y = deltaY;
# Backlash handling in Z
if abs(self.backlashZ) > self.epsilon :
deltaZ = self.Z - self.last_z
if abs(deltaZ) > self.epsilon : # non-zero move in that axis
if deltaZ * self.last_displacement_z < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_z = 0.0 if deltaZ > 0 else -self.backlashZ
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_+z_correction))
self.last_displacement_z = deltaZ;
self.last_x = self.X
self.last_y = self.Y
self.last_z = self.Z
# Send move command
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.X*outputScale+self.offset_x+self.backlash_compensation_x, self.Y*outputScale+self.offset_y+self.backlash_compensation_y, self.Z*outputScale+self.backlash_compensation_z+z_correction))
return outputCommands
def convertFile(self,infile,outfile):
# TODO: Handle XY offsets
inputdata = open(infile)
outdata = self.digestStream(inputdata)
outfile = open(outfile,'w')
for cmd in outdata :
outfile.write(cmd)
outfile.write('\n')
#print(cmd)
##################################################
class ModelaZeroControl:
# Constants
XY_INCREMENTS = 1
XY_INCREMENTS_LARGE= 100
Z_INCREMENTS = 1
Z_INCREMENTS_MED = 10
Z_INCREMENTS_LARGE = 100
Z_DEFAULT_OFFSET = -1300.0
FAST_TRAVEL_RATE = 600.0
Y_MAX = 4064.0
X_MAX = 6096.0
comport = None
ser = None
z_offset = 0.0
x = 0.0
y = 0.0
z = 0.0
last_x = 0.0
last_y = 0.0
last_z = 0.0
microscope_leveling_startpoint = None
microscope_leveling_endpoint = None
connected = False
hasZeroBeenSet = False
exitRequested = False
xy_zero = (0.0,0.0)
manual_leveling_points = None
def __init__(self,comport):
self.comport = comport
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
self.ser.close()
self.ser = None
self.connected = True
except serial.serialutil.SerialException as e :
print('Could not open '+comport)
self.connected = False
#sys.exit(1)
def sendCommand(self,cmd):
#print(cmd)
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
txt = cmd + '\n'
self.ser.write(txt.encode('ascii'))
self.ser.close()
self.ser = None
except serial.serialutil.SerialException as e :
#print(e)
print('Error writing to '+self.comport)
self.connected = False
#sys.exit(1)
def sendMoveCommand(self,wait=False):
if self.x < 0.0 : self.x = 0.0
if self.x > self.X_MAX : self.x = self.X_MAX
if self.y < 0.0 : self.y = 0.0
if self.y > self.Y_MAX : self.y = self.Y_MAX
#print('Moving to {:.0f},{:.0f},{:.0f}'.format(self.x,self.y,self.z))
spindle = '1' if self.spindleEnabled else '0'
# The esoteric syntax was borrowed from https://github.com/Craftweeks/MDX-LabPanel
self.sendCommand('^DF;!MC{0};!PZ0,0;V15.0;Z{1:.3f},{2:.3f},{3:.3f};!MC{0};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;'.format(spindle,self.x,self.y,self.z))
# Optional wait for move complete
dx = self.x - self.last_x
self.last_x = self.x
dy = self.y - self.last_y
self.last_y = self.y
dz = self.z - self.last_z
self.last_z = self.z
traveldist = math.sqrt(dx*dx+dy*dy+dz*dz)
if wait :
travelTime = traveldist / self.FAST_TRAVEL_RATE
time.sleep(travelTime)
#print('move done')
def run(self):
print('If the green light next to the VIEW button is lit, please press the VIEW button.')
print('Usage:')
print('\th - send to home')
print('\tz - Set Z zero')
print('\tZ - send to zero')
print('\twasd - move on the XY plane (+shift for small increments)')
print('\tup/down - move in the Z axis (+CTRL for medium increments, +ALT for large increments)')
print('\t1 - Set Microscope-based levelling starting point (both points must be set for autolevelling to happen)')
print('\t2 - Set Microscope-based levelling ending point')
print('\tm - Add manual levelling ending point (wrt zero, which must be set)')
print('\tq - Quit and move to next step.')
print('\tCTRL-C / ESC - Exit program.')
| self.sendCommand('^IN;!MC0;H') # clear errors, disable spindle, return home
self.z_offset = self.Z_DEFAULT_OFFSET
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero half way
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.spindleEnabled = False
self.sendMoveCommand(True)
self.xy_zero = (0.0,0.0)
while True : #self.connected :
c = msvcrt.getwche()
n = 0
#print(c)
if c == '\xe0' or c == '\x00' :
c = msvcrt.getwche()
n = ord(c)
#print(c,n)
if ( c == 'q' and n == 0 ) :
if not self.hasZeroBeenSet :
print('Would you like to set the current position as the Zero (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
self.setZeroHere()
print('Done')
return self.xy_zero
elif c == 'h' :
self.sendCommand('^DF;!MC0;H')
elif c == 'Z' :
(self.x,self.y) = self.xy_zero
self.z = 0.0
self.sendMoveCommand(True)
elif c == 'w' and n == 0 :
self.y += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 's' and n == 0 :
self.y -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'd' and n == 0 :
self.x += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'a' and n == 0 :
self.x -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'W' and n == 0 :
self.y += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'S' and n == 0 :
self.y -= self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'D' and n == 0 :
self.x += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'A' and n == 0 :
self.x -= self.XY_INCREMENTS
self.sendMoveCommand()
elif n == 72 : # up arrow
self.z += self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 80 : # down arrow
self.z -= self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 141 : # ctrl + up arrow
self.z += self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 145 : # ctrl + down arrow
self.z -= self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 152 : # alt + up arrow
self.z += self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif n == 160 : # alt + down arrow
self.z -= self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'z' and n == 0 :
self.setZeroHere()
elif c == 'm' and n == 0 :
self.setLevelingPointHere()
elif n == 75 : # left arrow
#self.sendCommand('^DF;!MC0;') # disable spindle
self.spindleEnabled = False
self.sendMoveCommand()
elif n == 77 : # right arrow
#self.sendCommand('^DF;!MC1;') # enable spindle
self.spindleEnabled = True
self.sendMoveCommand()
elif c == '1' :
self.microscope_leveling_startpoint = (self.x,self.y,self.z)
print('Setting leveling point 1 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif c == '2' :
self.microscope_leveling_endpoint = (self.x,self.y,self.z)
print('Setting leveling point 2 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif ord(c) == 27 : # Esc
self.exitRequested = True
return self.xy_zero
elif ord(c) == 3 : # CTRL-C
self.exitRequested = True
return self.xy_zero
else :
print( 'you entered: ' + str(n if n != 0 else ord(c) ))
pass
return self.xy_zero
def setZeroHere(self) :
print('Setting zero')
self.z_offset = self.z_offset + self.z
self.z = 0.0
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero to current
self.xy_zero = (self.x,self.y)
self.hasZeroBeenSet = True
if self.manual_leveling_points != None :
print('Warning: previously set manual leveling points lost.')
self.manual_leveling_points = None
return self.xy_zero
def setLevelingPointHere(self):
if not self.hasZeroBeenSet :
print('Warning: zero must be set before setting the leveling point. Setting it here.')
self.setZeroHere()
else :
if self.manual_leveling_points == None:
self.manual_leveling_points = [ (self.xy_zero[0],self.xy_zero[1],0.0) ]
print('Adding leveling point {} ({:.3f},{:.3f},{:.3f})'.format(len(self.manual_leveling_points),self.x,self.y,self.z))
self.manual_leveling_points.append( (self.x,self.y,self.z) )
def getManualLevelingPoints(self):
return self.manual_leveling_points
def moveTo(self,x,y,z,wait=False):
self.x = x
self.y = y
self.z = z
self.sendMoveCommand(wait)
def getAutolevelingData(self, cam, steps=1, heightpoints=50) :
if self.microscope_leveling_startpoint != None and self.microscope_leveling_endpoint != None :
print(self.microscope_leveling_startpoint,self.microscope_leveling_endpoint)
(x1,y1,z1) = self.microscope_leveling_startpoint
(x2,y2,z2) = self.microscope_leveling_endpoint
startingHeight = z1 + heightpoints/2
self.moveTo(x1,y1,z1,wait=True) # Go to start
#print(p1,p2)
heights = [[(0,0,0) for i in range(steps+1)] for j in range(steps+1)]
for i in range(steps+1) :
for j in range(steps+1) :
#print(i,j)
fx = float(i) / (steps)
fy = float(j) / (steps)
px = x1 + (x2-x1) * fx
py = y1 + (y2-y1) * fy
#print(px,py)
#print(i,j,interpolatedPosition)
focusValues = []
self.moveTo(px,py,startingHeight+5,wait=True)
for k in range(heightpoints):
h = startingHeight - k * 1.0
self.moveTo(px,py,h,wait=False)
time.sleep(0.033) # Take some time for focus value to settle
focusval = cam.getFocusValue()
#print(focusval)
focusValues.append( focusval )
#print(focusValues)
maxrank = numpy.argmax(focusValues)
self.moveTo(px,py,startingHeight-maxrank*1.0,wait=True)
# # TODO: Find max focus height position using curve fit
# poly_rank = 7
# focusValues_indexes = range(len(focusValues))
# polynomial = numpy.poly1d(numpy.polyfit(focusValues_indexes,focusValues,poly_rank))
# numpts = 500
# maxrank_high = numpy.argmax(polynomial(numpy.linspace(0, steps, numpts)))
# maxrank = ( maxrank_high / (numpts-1) ) * steps
# print(px,py,maxrank_high,maxrank)
heights[i][j] = ( px,py, maxrank)
# Bias results relative to initial point, at origin
(x0,y0,home_rank) = heights[0][0]
for i in range(steps+1) :
for j in range(steps+1) :
(x,y,r) = heights[i][j]
x = x - x0
y = y - y0
r = r - home_rank
heights[i][j] = (x,y,r)
#print(heights)
for col in heights :
print(col)
return heights
return None
##################################################
class MicroscopeFeed:
loopthread = None
threadlock = None
endLoopRequest = False
focusValue = 0.0
vidcap = None
connected = False
def __init__(self,channel):
self.channel = channel
self.threadlock = threading.Lock()
self.loopthread = threading.Thread(target=self.loopThread)
self.vidcap = cv2.VideoCapture(self.channel)
if self.vidcap.isOpened() :
self.connected = True
else :
print('Microscope connection could not be established.')
def isConnected(self):
return self.connected
def startLoop(self):
self.loopthread.start()
def loopThread(self):
if not self.vidcap.isOpened() : return
smoothed_laplacian_variance = 0.0
while True :
chk,frame = self.vidcap.read()
height, width = frame.shape[:2]
sz = 0.20 * width
x0 = int(width/2 - sz/2)
x1 = int(width/2 + sz/2)
y0 = int(height/2 - sz/2)
y1 = int(height/2 + sz/2)
center_frame = frame[ y0:y1, x0:x1 ]
center_gray = cv2.cvtColor(center_frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('center',center_gray)
laplacian = cv2.Laplacian(center_gray,cv2.CV_64F)
#cv2.imshow('laplacian',laplacian)
v = laplacian.var()
#smoothed_v_factor = 0.25
smoothed_v_factor = 0.50
smoothed_laplacian_variance = v * smoothed_v_factor + smoothed_laplacian_variance * (1.0-smoothed_v_factor)
#print('{:.0f} - {:.0f}'.format(v,smoothed_laplacian_variance))
cv2.rectangle(frame, (x0, y0), (x1, y1),(0,255,0), 2)
#textpos = (x0, y0)
textpos = (10, 20)
cv2.putText(frame, 'v = {:.2f} {:.2f}'.format(v,smoothed_laplacian_variance),textpos,cv2.FONT_HERSHEY_DUPLEX,0.8,(225,0,0))
cv2.namedWindow('vidcap', cv2.WINDOW_NORMAL)
cv2.imshow('vidcap',frame)
cv2.waitKey(1) # Required for video to be displayed
with self.threadlock :
self.focusValue = smoothed_laplacian_variance
if self.endLoopRequest :
self.vidcap.release()
cv2.destroyAllWindows()
break
def endLoop(self):
with self.threadlock :
self.endLoopRequest = True
self.loopthread.join()
def getFocusValue(self):
f = 0.0
with self.threadlock :
f = self.focusValue
return f
##################################################
def main():
import optparse
parser = optparse.OptionParser('usage%prog -i <input file>')
parser.add_option('-i', '--infile', dest='infile', default='', help='The input gcode file, as exported by FlatCam.')
parser.add_option('-o', '--outfile', dest='outfile', default='', help='The output RML-1 file.')
parser.add_option("-z", '--zero', dest='zero', action="store_true", default=False, help='Zero the print head on the work surface.')
#parser.add_option('-s', '--serialport', dest='serialport', default='', help='The com port for the MDX-15. (Default: obtained from the printer driver)')
parser.add_option("-p", '--print', dest='print', action="store_true", default=False, help='Prints the RML-1 data.')
parser.add_option('-n', '--printerName', dest='printerName', default='Roland MODELA MDX-15', help='The windows printer name. (Default: Roland MODELA MDX-15)')
parser.add_option('-f', '--feedspeedfactor', dest='feedspeedfactor', default=1.0, help='Feed rate scaling factor (Default: 1.0)')
parser.add_option('--backlashX', dest='backlashX', default=0.0, help='Backlash compensation in X direction (in steps).')
parser.add_option('--backlashY', dest='backlashY', default=0.0, help='Backlash compensation in y direction (in steps).')
parser.add_option('--backlashZ', dest='backlashZ', default=0.0, help='Backlash compensation in z direction (in steps).')
parser.add_option('--levelingsegments', dest='levelingsegments', default=1, help='Number of segments to split the work area for microscope-based leveling. (Default: 1)')
parser.add_option('-m','--microscope', dest='microscope', default=False, help='Enable microscope on channel N')
(options,args) = parser.parse_args()
#print(options)
debugmode = False
# Find serial port number using the printer driver.
serialport = ''
if options.zero : # Printer driver is only required if we want to set the zero
import subprocess
shelloutput = subprocess.check_output('powershell -Command "(Get-WmiObject Win32_Printer -Filter \\"Name=\'{}\'\\").PortName"'.format(options.printerName))
if len(shelloutput)>0 :
try :
serialport = shelloutput.decode('utf-8').split(':')[0]
print( 'Found {} printer driver ({})'.format(options.printerName,serialport) )
except:
print('Error parsing com port: ' + str(shelloutput) )
else :
print('Could not find the printer driver for: ' + options.printerName)
if not debugmode :
sys.exit(1)
# Start microscope feed if requested
mic = None
if options.microscope != False :
mic = MicroscopeFeed( int(options.microscope) )
mic.startLoop()
#msvcrt.getwch()
#print( mic.getFocusValue() )
try:
# Manually set zero and microscope set points
x_offset = 0.0
y_offset = 0.0
modelaZeroControl = None
manualLevelingPoints = None
if options.zero :
modelaZeroControl = ModelaZeroControl(serialport)
if modelaZeroControl.connected or debugmode :
print('Setting Zero')
(x_offset,y_offset) = modelaZeroControl.run()
manualLevelingPoints = modelaZeroControl.getManualLevelingPoints()
if modelaZeroControl.exitRequested :
print('Terminating program.')
sys.exit(1)
else :
print('Could not connect to the printer to set the zero.')
# Find bed level using microscope focus
levelingData = None
if mic != None and mic.isConnected() and modelaZeroControl != None :
try:
levelingData = modelaZeroControl.getAutolevelingData(mic, steps=int(options.levelingsegments) )
except KeyboardInterrupt :
print('Leveling cancelled, terminating program.')
sys.exit(1)
# gcode to rml conversion
if options.infile != '' :
if options.outfile == '' : options.outfile = options.infile + '.prn'
print('Converting {} to {}'.format(options.infile,options.outfile))
converter = GCode2RmlConverter(x_offset, y_offset, float(options.feedspeedfactor), float(options.backlashX), float(options.backlashY), float(options.backlashZ), levelingData, manualLevelingPoints )
converter.convertFile( options.infile, options.outfile )
# Send RML code to the printer driver.
if options.print :
if options.outfile != '' :
print('Are you ready to print (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
print('Printing: '+options.outfile)
os.system('RawFileToPrinter.exe "{}" "{}"'.format(options.outfile,options.printerName))
print('Procedure to cancel printing:')
print('1) Press the VIEW button on the printer.')
print('2) Cancel the print job(s) in windows. (start->Devices and Printers->...)')
print('3) Remove the usb cable to the printer.')
print('4) Press both the UP and Down buttons on the printer.')
print('5) When VIEW light stops blinking, press the VIEW butotn.')
print('6) Plug the usb cable back in.')
if mic != None and mic.isConnected() :
# Don't exit now if the camera is connected, in case we want visual feedback
print('Press any key to exit.')
msvcrt.getwch()
else :
print('Error: No file to be printed.')
except Exception as e:
#print(e)
traceback.print_exc()
# Release video stream
if mic != None :
mic.endLoop()
if __name__ == "__main__":
if sys.version_info[0] < 3 :
print("This script requires Python version 3")
sys.exit(1)
main() | random_line_split |
|
mdx15_print_gerber.py | #
# Print a gerber file to the MDX-15, optionally setting the home position
#
# Note: Uses RawFileToPrinter.exe as found at http://www.columbia.edu/~em36/windowsrawprint.html
# Note: Might work with other Roland Modela Models (MDX-20), but I don't have access to such machines, so I cannot test.
#
#
# MIT License
#
# Copyright (c) 2018 Charles Donohue
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import os
import time
import sys
import threading
import traceback
import math
import msvcrt
import serial
import cv2
import numpy
class GCode2RmlConverter:
# stateful variables
inputConversionFactor = 1.0 # mm units
X = 0.0
Y = 0.0
Z = 0.0
speedmode = None
feedrate = 0.0
isFirstCommand = True
offset_x = 0.0
offset_y = 0.0
feedspeedfactor = 1.0
# Backlash compensation related
backlashX = 0
backlashY = 0
backlashZ = 0
last_x = 0
last_y = 0
last_z = 0
last_displacement_x = 0.0
last_displacement_y = 0.0
last_displacement_z = 0.0
backlash_compensation_x = 0.0
backlash_compensation_y = 0.0
backlash_compensation_z = 0.0
epsilon = 0.001
levelingData = None
manualLevelingPoints = None
def __init__(self,offset_x,offset_y,feedspeedfactor,backlashX,backlashY,backlashZ,levelingData,manualLevelingPoints):
self.moveCommandParseRegex = re.compile(r'G0([01])\s(X([-+]?\d*\.*\d+\s*))?(Y([-+]?\d*\.*\d+\s*))?(Z([-+]?\d*\.*\d+\s*))?')
self.offset_x = offset_x
self.offset_y = offset_y
self.feedspeedfactor = feedspeedfactor
self.backlashX = backlashX
self.backlashY = backlashY
self.backlashZ = backlashZ
self.levelingData = levelingData
self.manualLevelingPoints = manualLevelingPoints
def digestStream(self, lineIterator):
outputCommands = []
for line in lineIterator :
outputCommands.extend( self.digestLine(line) )
return outputCommands
def digestLine(self,line):
outputCommands = []
if self.isFirstCommand :
self.isFirstCommand = False
# Initialization commands
outputCommands.append('^DF') # set to defaults
#outputCommands.append('! 1;Z 0,0,813') # not sure what this does. Maybe starts the spindle? TODO: Try without.
line = line.rstrip() # strip line endings
#print('cmd: '+line)
if line == None or len(line) == 0 :
pass # empty line
elif line.startswith('(') :
pass # comment line
elif line == 'G20' : # units as inches
self.inputConversionFactor = 25.4
elif line == 'G21' : # units as mm
self.inputConversionFactor = 1.0
elif line == 'G90' : # absolute mode
pass # implied
elif line == 'G94' : # Feed rate units per minute mode
pass # implied
elif line == 'M03' : # spindle on
pass
elif line == 'M05' : # spindle off
outputCommands.append('^DF;!MC0;')
outputCommands.append('H')
elif line.startswith('G01 F'): # in flatcam 2018, the feed rate is set in a move command
self.feedrate = float(line[5:])
elif line.startswith('G00') or line.startswith('G01'): # move
outputCommands.extend( self.processMoveCommand(line) )
elif line.startswith('G4 P'): # dwell
dwelltime = int(line[4:])
outputCommands.append('W {}'.format( dwelltime ) )
elif line.startswith('F'): # feed rate
self.feedrate = float(line[1:])
# ...
else :
print('Unrecognized command: ' + line)
pass
return outputCommands
def getHeightFor3PointPlane( self, p1,p2,p3, x, y ):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
v1 = [x3 - x1, y3 - y1, z3 - z1]
v2 = [x2 - x1, y2 - y1, z2 - z1]
cp = [v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0]]
a, b, c = cp
d = a * x1 + b * y1 + c * z1
z = (d - a * x - b * y) / float(c)
return z
def processMoveCommand(self, line):
#print(line)
outputCommands = []
g = self.moveCommandParseRegex.match(line)
if self.speedmode != g.group(1) :
self.speedmode = g.group(1)
#print( 'speed changed: ' + self.speedmode )
f = self.feedrate * self.inputConversionFactor * self.feedspeedfactor / 60.0 # convert to mm per second
if self.speedmode == '0' : f = 16.0 # fast mode
outputCommands.append('V {0:.2f};F {0:.2f}'.format(f))
if g.group(3) != None : self.X = float(g.group(3)) * self.inputConversionFactor
if g.group(5) != None : self.Y = float(g.group(5)) * self.inputConversionFactor
if g.group(7) != None : self.Z = float(g.group(7)) * self.inputConversionFactor
#outputScale = 1 / 0.01
outputScale = 1 / 0.025
# Z height correction
z_correction = 0.0
if self.levelingData != None :
n = len( self.levelingData[0] )
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
# Find quadrant in which point lies
i = 0
j = 0
while i < (n-2) :
if px >= (self.levelingData[i][j][0]-self.epsilon) and px < self.levelingData[i+1][j][0] : break
i = i+1
while j < (n-2) :
if py >= (self.levelingData[i][j][1]-self.epsilon) and py < self.levelingData[i][j+1][1] : break
j = j+1
# interpolate values
px0 = self.levelingData[i][j][0]
px1 = self.levelingData[i+1][j][0]
fx = (px - px0) / (px1 - px0)
h00 = self.levelingData[i][j][2]
h10 = self.levelingData[i+1][j][2]
h0 = h00 + (h10 - h00) * fx
h01 = self.levelingData[i][j+1][2]
h11 = self.levelingData[i+1][j+1][2]
h1 = h01 + (h11 - h01) * fx
py0 = self.levelingData[i][j][1]
py1 = self.levelingData[i][j+1][1]
fy = (py - py0) / (py1 - py0)
h = h0 + (h1 - h0) * fy
#print(px,py,i,j,fx,fy,self.Z,h,h/outputScale)
z_correction = -h
# Apply compensation to Z
#self.Z = self.Z - h/outputScale
# Manual leveling points
elif self.manualLevelingPoints != None :
if len(self.manualLevelingPoints) < 3 :
pass # At least 3 points required
else :
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
h = self.getHeightFor3PointPlane( self.manualLevelingPoints[0], self.manualLevelingPoints[1], self.manualLevelingPoints[2], px, py )
z_correction = +h
pass
# Backlash handling in X
if abs(self.backlashX) > self.epsilon :
deltaX = self.X - self.last_x
if abs(deltaX) > self.epsilon : # non-zero move in that axis
if deltaX * self.last_displacement_x < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_x = 0.0 if deltaX > 0 else -self.backlashX
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_x = deltaX;
# Backlash handling in Y
if abs(self.backlashY) > self.epsilon :
deltaY = self.Y - self.last_y
if abs(deltaY) > self.epsilon : # non-zero move in that axis
if deltaY * self.last_displacement_y < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_y = 0.0 if deltaY > 0 else -self.backlashY
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_y = deltaY;
# Backlash handling in Z
if abs(self.backlashZ) > self.epsilon :
deltaZ = self.Z - self.last_z
if abs(deltaZ) > self.epsilon : # non-zero move in that axis
if deltaZ * self.last_displacement_z < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_z = 0.0 if deltaZ > 0 else -self.backlashZ
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_+z_correction))
self.last_displacement_z = deltaZ;
self.last_x = self.X
self.last_y = self.Y
self.last_z = self.Z
# Send move command
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.X*outputScale+self.offset_x+self.backlash_compensation_x, self.Y*outputScale+self.offset_y+self.backlash_compensation_y, self.Z*outputScale+self.backlash_compensation_z+z_correction))
return outputCommands
def convertFile(self,infile,outfile):
# TODO: Handle XY offsets
inputdata = open(infile)
outdata = self.digestStream(inputdata)
outfile = open(outfile,'w')
for cmd in outdata :
outfile.write(cmd)
outfile.write('\n')
#print(cmd)
##################################################
class ModelaZeroControl:
# Constants
XY_INCREMENTS = 1
XY_INCREMENTS_LARGE= 100
Z_INCREMENTS = 1
Z_INCREMENTS_MED = 10
Z_INCREMENTS_LARGE = 100
Z_DEFAULT_OFFSET = -1300.0
FAST_TRAVEL_RATE = 600.0
Y_MAX = 4064.0
X_MAX = 6096.0
comport = None
ser = None
z_offset = 0.0
x = 0.0
y = 0.0
z = 0.0
last_x = 0.0
last_y = 0.0
last_z = 0.0
microscope_leveling_startpoint = None
microscope_leveling_endpoint = None
connected = False
hasZeroBeenSet = False
exitRequested = False
xy_zero = (0.0,0.0)
manual_leveling_points = None
def __init__(self,comport):
self.comport = comport
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
self.ser.close()
self.ser = None
self.connected = True
except serial.serialutil.SerialException as e :
print('Could not open '+comport)
self.connected = False
#sys.exit(1)
def sendCommand(self,cmd):
#print(cmd)
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
txt = cmd + '\n'
self.ser.write(txt.encode('ascii'))
self.ser.close()
self.ser = None
except serial.serialutil.SerialException as e :
#print(e)
print('Error writing to '+self.comport)
self.connected = False
#sys.exit(1)
def sendMoveCommand(self,wait=False):
if self.x < 0.0 : self.x = 0.0
if self.x > self.X_MAX : self.x = self.X_MAX
if self.y < 0.0 : self.y = 0.0
if self.y > self.Y_MAX : self.y = self.Y_MAX
#print('Moving to {:.0f},{:.0f},{:.0f}'.format(self.x,self.y,self.z))
spindle = '1' if self.spindleEnabled else '0'
# The esoteric syntax was borrowed from https://github.com/Craftweeks/MDX-LabPanel
self.sendCommand('^DF;!MC{0};!PZ0,0;V15.0;Z{1:.3f},{2:.3f},{3:.3f};!MC{0};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;'.format(spindle,self.x,self.y,self.z))
# Optional wait for move complete
dx = self.x - self.last_x
self.last_x = self.x
dy = self.y - self.last_y
self.last_y = self.y
dz = self.z - self.last_z
self.last_z = self.z
traveldist = math.sqrt(dx*dx+dy*dy+dz*dz)
if wait :
travelTime = traveldist / self.FAST_TRAVEL_RATE
time.sleep(travelTime)
#print('move done')
def run(self):
print('If the green light next to the VIEW button is lit, please press the VIEW button.')
print('Usage:')
print('\th - send to home')
print('\tz - Set Z zero')
print('\tZ - send to zero')
print('\twasd - move on the XY plane (+shift for small increments)')
print('\tup/down - move in the Z axis (+CTRL for medium increments, +ALT for large increments)')
print('\t1 - Set Microscope-based levelling starting point (both points must be set for autolevelling to happen)')
print('\t2 - Set Microscope-based levelling ending point')
print('\tm - Add manual levelling ending point (wrt zero, which must be set)')
print('\tq - Quit and move to next step.')
print('\tCTRL-C / ESC - Exit program.')
self.sendCommand('^IN;!MC0;H') # clear errors, disable spindle, return home
self.z_offset = self.Z_DEFAULT_OFFSET
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero half way
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.spindleEnabled = False
self.sendMoveCommand(True)
self.xy_zero = (0.0,0.0)
while True : #self.connected :
c = msvcrt.getwche()
n = 0
#print(c)
if c == '\xe0' or c == '\x00' :
c = msvcrt.getwche()
n = ord(c)
#print(c,n)
if ( c == 'q' and n == 0 ) :
if not self.hasZeroBeenSet :
print('Would you like to set the current position as the Zero (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
self.setZeroHere()
print('Done')
return self.xy_zero
elif c == 'h' :
self.sendCommand('^DF;!MC0;H')
elif c == 'Z' :
(self.x,self.y) = self.xy_zero
self.z = 0.0
self.sendMoveCommand(True)
elif c == 'w' and n == 0 :
self.y += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 's' and n == 0 :
self.y -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'd' and n == 0 :
self.x += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'a' and n == 0 :
self.x -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'W' and n == 0 :
self.y += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'S' and n == 0 :
self.y -= self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'D' and n == 0 :
self.x += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'A' and n == 0 :
self.x -= self.XY_INCREMENTS
self.sendMoveCommand()
elif n == 72 : # up arrow
self.z += self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 80 : # down arrow
self.z -= self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 141 : # ctrl + up arrow
self.z += self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 145 : # ctrl + down arrow
self.z -= self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 152 : # alt + up arrow
self.z += self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif n == 160 : # alt + down arrow
self.z -= self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'z' and n == 0 :
self.setZeroHere()
elif c == 'm' and n == 0 :
self.setLevelingPointHere()
elif n == 75 : # left arrow
#self.sendCommand('^DF;!MC0;') # disable spindle
self.spindleEnabled = False
self.sendMoveCommand()
elif n == 77 : # right arrow
#self.sendCommand('^DF;!MC1;') # enable spindle
self.spindleEnabled = True
self.sendMoveCommand()
elif c == '1' :
self.microscope_leveling_startpoint = (self.x,self.y,self.z)
print('Setting leveling point 1 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif c == '2' :
self.microscope_leveling_endpoint = (self.x,self.y,self.z)
print('Setting leveling point 2 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif ord(c) == 27 : # Esc
self.exitRequested = True
return self.xy_zero
elif ord(c) == 3 : # CTRL-C
self.exitRequested = True
return self.xy_zero
else :
print( 'you entered: ' + str(n if n != 0 else ord(c) ))
pass
return self.xy_zero
def setZeroHere(self) :
print('Setting zero')
self.z_offset = self.z_offset + self.z
self.z = 0.0
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero to current
self.xy_zero = (self.x,self.y)
self.hasZeroBeenSet = True
if self.manual_leveling_points != None :
print('Warning: previously set manual leveling points lost.')
self.manual_leveling_points = None
return self.xy_zero
def setLevelingPointHere(self):
if not self.hasZeroBeenSet :
print('Warning: zero must be set before setting the leveling point. Setting it here.')
self.setZeroHere()
else :
if self.manual_leveling_points == None:
self.manual_leveling_points = [ (self.xy_zero[0],self.xy_zero[1],0.0) ]
print('Adding leveling point {} ({:.3f},{:.3f},{:.3f})'.format(len(self.manual_leveling_points),self.x,self.y,self.z))
self.manual_leveling_points.append( (self.x,self.y,self.z) )
def getManualLevelingPoints(self):
return self.manual_leveling_points
def moveTo(self,x,y,z,wait=False):
self.x = x
self.y = y
self.z = z
self.sendMoveCommand(wait)
def getAutolevelingData(self, cam, steps=1, heightpoints=50) :
if self.microscope_leveling_startpoint != None and self.microscope_leveling_endpoint != None :
print(self.microscope_leveling_startpoint,self.microscope_leveling_endpoint)
(x1,y1,z1) = self.microscope_leveling_startpoint
(x2,y2,z2) = self.microscope_leveling_endpoint
startingHeight = z1 + heightpoints/2
self.moveTo(x1,y1,z1,wait=True) # Go to start
#print(p1,p2)
heights = [[(0,0,0) for i in range(steps+1)] for j in range(steps+1)]
for i in range(steps+1) :
for j in range(steps+1) :
#print(i,j)
fx = float(i) / (steps)
fy = float(j) / (steps)
px = x1 + (x2-x1) * fx
py = y1 + (y2-y1) * fy
#print(px,py)
#print(i,j,interpolatedPosition)
focusValues = []
self.moveTo(px,py,startingHeight+5,wait=True)
for k in range(heightpoints):
h = startingHeight - k * 1.0
self.moveTo(px,py,h,wait=False)
time.sleep(0.033) # Take some time for focus value to settle
focusval = cam.getFocusValue()
#print(focusval)
focusValues.append( focusval )
#print(focusValues)
maxrank = numpy.argmax(focusValues)
self.moveTo(px,py,startingHeight-maxrank*1.0,wait=True)
# # TODO: Find max focus height position using curve fit
# poly_rank = 7
# focusValues_indexes = range(len(focusValues))
# polynomial = numpy.poly1d(numpy.polyfit(focusValues_indexes,focusValues,poly_rank))
# numpts = 500
# maxrank_high = numpy.argmax(polynomial(numpy.linspace(0, steps, numpts)))
# maxrank = ( maxrank_high / (numpts-1) ) * steps
# print(px,py,maxrank_high,maxrank)
heights[i][j] = ( px,py, maxrank)
# Bias results relative to initial point, at origin
(x0,y0,home_rank) = heights[0][0]
for i in range(steps+1) :
for j in range(steps+1) :
(x,y,r) = heights[i][j]
x = x - x0
y = y - y0
r = r - home_rank
heights[i][j] = (x,y,r)
#print(heights)
for col in heights :
print(col)
return heights
return None
##################################################
class MicroscopeFeed:
loopthread = None
threadlock = None
endLoopRequest = False
focusValue = 0.0
vidcap = None
connected = False
def __init__(self,channel):
self.channel = channel
self.threadlock = threading.Lock()
self.loopthread = threading.Thread(target=self.loopThread)
self.vidcap = cv2.VideoCapture(self.channel)
if self.vidcap.isOpened() :
self.connected = True
else :
print('Microscope connection could not be established.')
def isConnected(self):
return self.connected
def startLoop(self):
self.loopthread.start()
def loopThread(self):
if not self.vidcap.isOpened() : return
smoothed_laplacian_variance = 0.0
while True :
chk,frame = self.vidcap.read()
height, width = frame.shape[:2]
sz = 0.20 * width
x0 = int(width/2 - sz/2)
x1 = int(width/2 + sz/2)
y0 = int(height/2 - sz/2)
y1 = int(height/2 + sz/2)
center_frame = frame[ y0:y1, x0:x1 ]
center_gray = cv2.cvtColor(center_frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('center',center_gray)
laplacian = cv2.Laplacian(center_gray,cv2.CV_64F)
#cv2.imshow('laplacian',laplacian)
v = laplacian.var()
#smoothed_v_factor = 0.25
smoothed_v_factor = 0.50
smoothed_laplacian_variance = v * smoothed_v_factor + smoothed_laplacian_variance * (1.0-smoothed_v_factor)
#print('{:.0f} - {:.0f}'.format(v,smoothed_laplacian_variance))
cv2.rectangle(frame, (x0, y0), (x1, y1),(0,255,0), 2)
#textpos = (x0, y0)
textpos = (10, 20)
cv2.putText(frame, 'v = {:.2f} {:.2f}'.format(v,smoothed_laplacian_variance),textpos,cv2.FONT_HERSHEY_DUPLEX,0.8,(225,0,0))
cv2.namedWindow('vidcap', cv2.WINDOW_NORMAL)
cv2.imshow('vidcap',frame)
cv2.waitKey(1) # Required for video to be displayed
with self.threadlock :
self.focusValue = smoothed_laplacian_variance
if self.endLoopRequest :
self.vidcap.release()
cv2.destroyAllWindows()
break
def endLoop(self):
with self.threadlock :
self.endLoopRequest = True
self.loopthread.join()
def getFocusValue(self):
f = 0.0
with self.threadlock :
f = self.focusValue
return f
##################################################
def main():
import optparse
parser = optparse.OptionParser('usage%prog -i <input file>')
parser.add_option('-i', '--infile', dest='infile', default='', help='The input gcode file, as exported by FlatCam.')
parser.add_option('-o', '--outfile', dest='outfile', default='', help='The output RML-1 file.')
parser.add_option("-z", '--zero', dest='zero', action="store_true", default=False, help='Zero the print head on the work surface.')
#parser.add_option('-s', '--serialport', dest='serialport', default='', help='The com port for the MDX-15. (Default: obtained from the printer driver)')
parser.add_option("-p", '--print', dest='print', action="store_true", default=False, help='Prints the RML-1 data.')
parser.add_option('-n', '--printerName', dest='printerName', default='Roland MODELA MDX-15', help='The windows printer name. (Default: Roland MODELA MDX-15)')
parser.add_option('-f', '--feedspeedfactor', dest='feedspeedfactor', default=1.0, help='Feed rate scaling factor (Default: 1.0)')
parser.add_option('--backlashX', dest='backlashX', default=0.0, help='Backlash compensation in X direction (in steps).')
parser.add_option('--backlashY', dest='backlashY', default=0.0, help='Backlash compensation in y direction (in steps).')
parser.add_option('--backlashZ', dest='backlashZ', default=0.0, help='Backlash compensation in z direction (in steps).')
parser.add_option('--levelingsegments', dest='levelingsegments', default=1, help='Number of segments to split the work area for microscope-based leveling. (Default: 1)')
parser.add_option('-m','--microscope', dest='microscope', default=False, help='Enable microscope on channel N')
(options,args) = parser.parse_args()
#print(options)
debugmode = False
# Find serial port number using the printer driver.
serialport = ''
if options.zero : # Printer driver is only required if we want to set the zero
|
# Start microscope feed if requested
mic = None
if options.microscope != False :
mic = MicroscopeFeed( int(options.microscope) )
mic.startLoop()
#msvcrt.getwch()
#print( mic.getFocusValue() )
try:
# Manually set zero and microscope set points
x_offset = 0.0
y_offset = 0.0
modelaZeroControl = None
manualLevelingPoints = None
if options.zero :
modelaZeroControl = ModelaZeroControl(serialport)
if modelaZeroControl.connected or debugmode :
print('Setting Zero')
(x_offset,y_offset) = modelaZeroControl.run()
manualLevelingPoints = modelaZeroControl.getManualLevelingPoints()
if modelaZeroControl.exitRequested :
print('Terminating program.')
sys.exit(1)
else :
print('Could not connect to the printer to set the zero.')
# Find bed level using microscope focus
levelingData = None
if mic != None and mic.isConnected() and modelaZeroControl != None :
try:
levelingData = modelaZeroControl.getAutolevelingData(mic, steps=int(options.levelingsegments) )
except KeyboardInterrupt :
print('Leveling cancelled, terminating program.')
sys.exit(1)
# gcode to rml conversion
if options.infile != '' :
if options.outfile == '' : options.outfile = options.infile + '.prn'
print('Converting {} to {}'.format(options.infile,options.outfile))
converter = GCode2RmlConverter(x_offset, y_offset, float(options.feedspeedfactor), float(options.backlashX), float(options.backlashY), float(options.backlashZ), levelingData, manualLevelingPoints )
converter.convertFile( options.infile, options.outfile )
# Send RML code to the printer driver.
if options.print :
if options.outfile != '' :
print('Are you ready to print (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
print('Printing: '+options.outfile)
os.system('RawFileToPrinter.exe "{}" "{}"'.format(options.outfile,options.printerName))
print('Procedure to cancel printing:')
print('1) Press the VIEW button on the printer.')
print('2) Cancel the print job(s) in windows. (start->Devices and Printers->...)')
print('3) Remove the usb cable to the printer.')
print('4) Press both the UP and Down buttons on the printer.')
print('5) When VIEW light stops blinking, press the VIEW butotn.')
print('6) Plug the usb cable back in.')
if mic != None and mic.isConnected() :
# Don't exit now if the camera is connected, in case we want visual feedback
print('Press any key to exit.')
msvcrt.getwch()
else :
print('Error: No file to be printed.')
except Exception as e:
#print(e)
traceback.print_exc()
# Release video stream
if mic != None :
mic.endLoop()
if __name__ == "__main__":
if sys.version_info[0] < 3 :
print("This script requires Python version 3")
sys.exit(1)
main() | import subprocess
shelloutput = subprocess.check_output('powershell -Command "(Get-WmiObject Win32_Printer -Filter \\"Name=\'{}\'\\").PortName"'.format(options.printerName))
if len(shelloutput)>0 :
try :
serialport = shelloutput.decode('utf-8').split(':')[0]
print( 'Found {} printer driver ({})'.format(options.printerName,serialport) )
except:
print('Error parsing com port: ' + str(shelloutput) )
else :
print('Could not find the printer driver for: ' + options.printerName)
if not debugmode :
sys.exit(1) | conditional_block |
mdx15_print_gerber.py | #
# Print a gerber file to the MDX-15, optionally setting the home position
#
# Note: Uses RawFileToPrinter.exe as found at http://www.columbia.edu/~em36/windowsrawprint.html
# Note: Might work with other Roland Modela Models (MDX-20), but I don't have access to such machines, so I cannot test.
#
#
# MIT License
#
# Copyright (c) 2018 Charles Donohue
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import os
import time
import sys
import threading
import traceback
import math
import msvcrt
import serial
import cv2
import numpy
class | :
# stateful variables
inputConversionFactor = 1.0 # mm units
X = 0.0
Y = 0.0
Z = 0.0
speedmode = None
feedrate = 0.0
isFirstCommand = True
offset_x = 0.0
offset_y = 0.0
feedspeedfactor = 1.0
# Backlash compensation related
backlashX = 0
backlashY = 0
backlashZ = 0
last_x = 0
last_y = 0
last_z = 0
last_displacement_x = 0.0
last_displacement_y = 0.0
last_displacement_z = 0.0
backlash_compensation_x = 0.0
backlash_compensation_y = 0.0
backlash_compensation_z = 0.0
epsilon = 0.001
levelingData = None
manualLevelingPoints = None
def __init__(self,offset_x,offset_y,feedspeedfactor,backlashX,backlashY,backlashZ,levelingData,manualLevelingPoints):
self.moveCommandParseRegex = re.compile(r'G0([01])\s(X([-+]?\d*\.*\d+\s*))?(Y([-+]?\d*\.*\d+\s*))?(Z([-+]?\d*\.*\d+\s*))?')
self.offset_x = offset_x
self.offset_y = offset_y
self.feedspeedfactor = feedspeedfactor
self.backlashX = backlashX
self.backlashY = backlashY
self.backlashZ = backlashZ
self.levelingData = levelingData
self.manualLevelingPoints = manualLevelingPoints
def digestStream(self, lineIterator):
outputCommands = []
for line in lineIterator :
outputCommands.extend( self.digestLine(line) )
return outputCommands
def digestLine(self,line):
outputCommands = []
if self.isFirstCommand :
self.isFirstCommand = False
# Initialization commands
outputCommands.append('^DF') # set to defaults
#outputCommands.append('! 1;Z 0,0,813') # not sure what this does. Maybe starts the spindle? TODO: Try without.
line = line.rstrip() # strip line endings
#print('cmd: '+line)
if line == None or len(line) == 0 :
pass # empty line
elif line.startswith('(') :
pass # comment line
elif line == 'G20' : # units as inches
self.inputConversionFactor = 25.4
elif line == 'G21' : # units as mm
self.inputConversionFactor = 1.0
elif line == 'G90' : # absolute mode
pass # implied
elif line == 'G94' : # Feed rate units per minute mode
pass # implied
elif line == 'M03' : # spindle on
pass
elif line == 'M05' : # spindle off
outputCommands.append('^DF;!MC0;')
outputCommands.append('H')
elif line.startswith('G01 F'): # in flatcam 2018, the feed rate is set in a move command
self.feedrate = float(line[5:])
elif line.startswith('G00') or line.startswith('G01'): # move
outputCommands.extend( self.processMoveCommand(line) )
elif line.startswith('G4 P'): # dwell
dwelltime = int(line[4:])
outputCommands.append('W {}'.format( dwelltime ) )
elif line.startswith('F'): # feed rate
self.feedrate = float(line[1:])
# ...
else :
print('Unrecognized command: ' + line)
pass
return outputCommands
def getHeightFor3PointPlane( self, p1,p2,p3, x, y ):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
v1 = [x3 - x1, y3 - y1, z3 - z1]
v2 = [x2 - x1, y2 - y1, z2 - z1]
cp = [v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0]]
a, b, c = cp
d = a * x1 + b * y1 + c * z1
z = (d - a * x - b * y) / float(c)
return z
def processMoveCommand(self, line):
#print(line)
outputCommands = []
g = self.moveCommandParseRegex.match(line)
if self.speedmode != g.group(1) :
self.speedmode = g.group(1)
#print( 'speed changed: ' + self.speedmode )
f = self.feedrate * self.inputConversionFactor * self.feedspeedfactor / 60.0 # convert to mm per second
if self.speedmode == '0' : f = 16.0 # fast mode
outputCommands.append('V {0:.2f};F {0:.2f}'.format(f))
if g.group(3) != None : self.X = float(g.group(3)) * self.inputConversionFactor
if g.group(5) != None : self.Y = float(g.group(5)) * self.inputConversionFactor
if g.group(7) != None : self.Z = float(g.group(7)) * self.inputConversionFactor
#outputScale = 1 / 0.01
outputScale = 1 / 0.025
# Z height correction
z_correction = 0.0
if self.levelingData != None :
n = len( self.levelingData[0] )
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
# Find quadrant in which point lies
i = 0
j = 0
while i < (n-2) :
if px >= (self.levelingData[i][j][0]-self.epsilon) and px < self.levelingData[i+1][j][0] : break
i = i+1
while j < (n-2) :
if py >= (self.levelingData[i][j][1]-self.epsilon) and py < self.levelingData[i][j+1][1] : break
j = j+1
# interpolate values
px0 = self.levelingData[i][j][0]
px1 = self.levelingData[i+1][j][0]
fx = (px - px0) / (px1 - px0)
h00 = self.levelingData[i][j][2]
h10 = self.levelingData[i+1][j][2]
h0 = h00 + (h10 - h00) * fx
h01 = self.levelingData[i][j+1][2]
h11 = self.levelingData[i+1][j+1][2]
h1 = h01 + (h11 - h01) * fx
py0 = self.levelingData[i][j][1]
py1 = self.levelingData[i][j+1][1]
fy = (py - py0) / (py1 - py0)
h = h0 + (h1 - h0) * fy
#print(px,py,i,j,fx,fy,self.Z,h,h/outputScale)
z_correction = -h
# Apply compensation to Z
#self.Z = self.Z - h/outputScale
# Manual leveling points
elif self.manualLevelingPoints != None :
if len(self.manualLevelingPoints) < 3 :
pass # At least 3 points required
else :
px = self.X*outputScale #+self.offset_x
py = self.Y*outputScale #+self.offset_y
h = self.getHeightFor3PointPlane( self.manualLevelingPoints[0], self.manualLevelingPoints[1], self.manualLevelingPoints[2], px, py )
z_correction = +h
pass
# Backlash handling in X
if abs(self.backlashX) > self.epsilon :
deltaX = self.X - self.last_x
if abs(deltaX) > self.epsilon : # non-zero move in that axis
if deltaX * self.last_displacement_x < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_x = 0.0 if deltaX > 0 else -self.backlashX
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_x = deltaX;
# Backlash handling in Y
if abs(self.backlashY) > self.epsilon :
deltaY = self.Y - self.last_y
if abs(deltaY) > self.epsilon : # non-zero move in that axis
if deltaY * self.last_displacement_y < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_y = 0.0 if deltaY > 0 else -self.backlashY
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_z+z_correction))
self.last_displacement_y = deltaY;
# Backlash handling in Z
if abs(self.backlashZ) > self.epsilon :
deltaZ = self.Z - self.last_z
if abs(deltaZ) > self.epsilon : # non-zero move in that axis
if deltaZ * self.last_displacement_z < 0 : # direction changed
# move to last position with offset in new move dir
self.backlash_compensation_z = 0.0 if deltaZ > 0 else -self.backlashZ
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.last_x*outputScale+self.offset_x+self.backlash_compensation_x,self.last_y*outputScale+self.offset_y+self.backlash_compensation_y,self.last_z*outputScale+self.backlash_compensation_+z_correction))
self.last_displacement_z = deltaZ;
self.last_x = self.X
self.last_y = self.Y
self.last_z = self.Z
# Send move command
outputCommands.append('Z {:.0f},{:.0f},{:.0f}'.format(self.X*outputScale+self.offset_x+self.backlash_compensation_x, self.Y*outputScale+self.offset_y+self.backlash_compensation_y, self.Z*outputScale+self.backlash_compensation_z+z_correction))
return outputCommands
def convertFile(self,infile,outfile):
# TODO: Handle XY offsets
inputdata = open(infile)
outdata = self.digestStream(inputdata)
outfile = open(outfile,'w')
for cmd in outdata :
outfile.write(cmd)
outfile.write('\n')
#print(cmd)
##################################################
class ModelaZeroControl:
# Constants
XY_INCREMENTS = 1
XY_INCREMENTS_LARGE= 100
Z_INCREMENTS = 1
Z_INCREMENTS_MED = 10
Z_INCREMENTS_LARGE = 100
Z_DEFAULT_OFFSET = -1300.0
FAST_TRAVEL_RATE = 600.0
Y_MAX = 4064.0
X_MAX = 6096.0
comport = None
ser = None
z_offset = 0.0
x = 0.0
y = 0.0
z = 0.0
last_x = 0.0
last_y = 0.0
last_z = 0.0
microscope_leveling_startpoint = None
microscope_leveling_endpoint = None
connected = False
hasZeroBeenSet = False
exitRequested = False
xy_zero = (0.0,0.0)
manual_leveling_points = None
def __init__(self,comport):
self.comport = comport
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
self.ser.close()
self.ser = None
self.connected = True
except serial.serialutil.SerialException as e :
print('Could not open '+comport)
self.connected = False
#sys.exit(1)
def sendCommand(self,cmd):
#print(cmd)
try :
self.ser = serial.Serial(self.comport,9600,rtscts=1)
txt = cmd + '\n'
self.ser.write(txt.encode('ascii'))
self.ser.close()
self.ser = None
except serial.serialutil.SerialException as e :
#print(e)
print('Error writing to '+self.comport)
self.connected = False
#sys.exit(1)
def sendMoveCommand(self,wait=False):
if self.x < 0.0 : self.x = 0.0
if self.x > self.X_MAX : self.x = self.X_MAX
if self.y < 0.0 : self.y = 0.0
if self.y > self.Y_MAX : self.y = self.Y_MAX
#print('Moving to {:.0f},{:.0f},{:.0f}'.format(self.x,self.y,self.z))
spindle = '1' if self.spindleEnabled else '0'
# The esoteric syntax was borrowed from https://github.com/Craftweeks/MDX-LabPanel
self.sendCommand('^DF;!MC{0};!PZ0,0;V15.0;Z{1:.3f},{2:.3f},{3:.3f};!MC{0};;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;'.format(spindle,self.x,self.y,self.z))
# Optional wait for move complete
dx = self.x - self.last_x
self.last_x = self.x
dy = self.y - self.last_y
self.last_y = self.y
dz = self.z - self.last_z
self.last_z = self.z
traveldist = math.sqrt(dx*dx+dy*dy+dz*dz)
if wait :
travelTime = traveldist / self.FAST_TRAVEL_RATE
time.sleep(travelTime)
#print('move done')
def run(self):
print('If the green light next to the VIEW button is lit, please press the VIEW button.')
print('Usage:')
print('\th - send to home')
print('\tz - Set Z zero')
print('\tZ - send to zero')
print('\twasd - move on the XY plane (+shift for small increments)')
print('\tup/down - move in the Z axis (+CTRL for medium increments, +ALT for large increments)')
print('\t1 - Set Microscope-based levelling starting point (both points must be set for autolevelling to happen)')
print('\t2 - Set Microscope-based levelling ending point')
print('\tm - Add manual levelling ending point (wrt zero, which must be set)')
print('\tq - Quit and move to next step.')
print('\tCTRL-C / ESC - Exit program.')
self.sendCommand('^IN;!MC0;H') # clear errors, disable spindle, return home
self.z_offset = self.Z_DEFAULT_OFFSET
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero half way
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.spindleEnabled = False
self.sendMoveCommand(True)
self.xy_zero = (0.0,0.0)
while True : #self.connected :
c = msvcrt.getwche()
n = 0
#print(c)
if c == '\xe0' or c == '\x00' :
c = msvcrt.getwche()
n = ord(c)
#print(c,n)
if ( c == 'q' and n == 0 ) :
if not self.hasZeroBeenSet :
print('Would you like to set the current position as the Zero (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
self.setZeroHere()
print('Done')
return self.xy_zero
elif c == 'h' :
self.sendCommand('^DF;!MC0;H')
elif c == 'Z' :
(self.x,self.y) = self.xy_zero
self.z = 0.0
self.sendMoveCommand(True)
elif c == 'w' and n == 0 :
self.y += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 's' and n == 0 :
self.y -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'd' and n == 0 :
self.x += self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'a' and n == 0 :
self.x -= self.XY_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'W' and n == 0 :
self.y += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'S' and n == 0 :
self.y -= self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'D' and n == 0 :
self.x += self.XY_INCREMENTS
self.sendMoveCommand()
elif c == 'A' and n == 0 :
self.x -= self.XY_INCREMENTS
self.sendMoveCommand()
elif n == 72 : # up arrow
self.z += self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 80 : # down arrow
self.z -= self.Z_INCREMENTS
self.sendMoveCommand()
elif n == 141 : # ctrl + up arrow
self.z += self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 145 : # ctrl + down arrow
self.z -= self.Z_INCREMENTS_MED
self.sendMoveCommand()
elif n == 152 : # alt + up arrow
self.z += self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif n == 160 : # alt + down arrow
self.z -= self.Z_INCREMENTS_LARGE
self.sendMoveCommand()
elif c == 'z' and n == 0 :
self.setZeroHere()
elif c == 'm' and n == 0 :
self.setLevelingPointHere()
elif n == 75 : # left arrow
#self.sendCommand('^DF;!MC0;') # disable spindle
self.spindleEnabled = False
self.sendMoveCommand()
elif n == 77 : # right arrow
#self.sendCommand('^DF;!MC1;') # enable spindle
self.spindleEnabled = True
self.sendMoveCommand()
elif c == '1' :
self.microscope_leveling_startpoint = (self.x,self.y,self.z)
print('Setting leveling point 1 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif c == '2' :
self.microscope_leveling_endpoint = (self.x,self.y,self.z)
print('Setting leveling point 2 ({:.3f},{:.3f},{:.3f})'.format(self.x,self.y,self.z))
elif ord(c) == 27 : # Esc
self.exitRequested = True
return self.xy_zero
elif ord(c) == 3 : # CTRL-C
self.exitRequested = True
return self.xy_zero
else :
print( 'you entered: ' + str(n if n != 0 else ord(c) ))
pass
return self.xy_zero
def setZeroHere(self) :
print('Setting zero')
self.z_offset = self.z_offset + self.z
self.z = 0.0
self.sendCommand('^DF;!ZO{:.3f};;'.format(self.z_offset)) # set z zero to current
self.xy_zero = (self.x,self.y)
self.hasZeroBeenSet = True
if self.manual_leveling_points != None :
print('Warning: previously set manual leveling points lost.')
self.manual_leveling_points = None
return self.xy_zero
def setLevelingPointHere(self):
if not self.hasZeroBeenSet :
print('Warning: zero must be set before setting the leveling point. Setting it here.')
self.setZeroHere()
else :
if self.manual_leveling_points == None:
self.manual_leveling_points = [ (self.xy_zero[0],self.xy_zero[1],0.0) ]
print('Adding leveling point {} ({:.3f},{:.3f},{:.3f})'.format(len(self.manual_leveling_points),self.x,self.y,self.z))
self.manual_leveling_points.append( (self.x,self.y,self.z) )
def getManualLevelingPoints(self):
return self.manual_leveling_points
def moveTo(self,x,y,z,wait=False):
self.x = x
self.y = y
self.z = z
self.sendMoveCommand(wait)
def getAutolevelingData(self, cam, steps=1, heightpoints=50) :
if self.microscope_leveling_startpoint != None and self.microscope_leveling_endpoint != None :
print(self.microscope_leveling_startpoint,self.microscope_leveling_endpoint)
(x1,y1,z1) = self.microscope_leveling_startpoint
(x2,y2,z2) = self.microscope_leveling_endpoint
startingHeight = z1 + heightpoints/2
self.moveTo(x1,y1,z1,wait=True) # Go to start
#print(p1,p2)
heights = [[(0,0,0) for i in range(steps+1)] for j in range(steps+1)]
for i in range(steps+1) :
for j in range(steps+1) :
#print(i,j)
fx = float(i) / (steps)
fy = float(j) / (steps)
px = x1 + (x2-x1) * fx
py = y1 + (y2-y1) * fy
#print(px,py)
#print(i,j,interpolatedPosition)
focusValues = []
self.moveTo(px,py,startingHeight+5,wait=True)
for k in range(heightpoints):
h = startingHeight - k * 1.0
self.moveTo(px,py,h,wait=False)
time.sleep(0.033) # Take some time for focus value to settle
focusval = cam.getFocusValue()
#print(focusval)
focusValues.append( focusval )
#print(focusValues)
maxrank = numpy.argmax(focusValues)
self.moveTo(px,py,startingHeight-maxrank*1.0,wait=True)
# # TODO: Find max focus height position using curve fit
# poly_rank = 7
# focusValues_indexes = range(len(focusValues))
# polynomial = numpy.poly1d(numpy.polyfit(focusValues_indexes,focusValues,poly_rank))
# numpts = 500
# maxrank_high = numpy.argmax(polynomial(numpy.linspace(0, steps, numpts)))
# maxrank = ( maxrank_high / (numpts-1) ) * steps
# print(px,py,maxrank_high,maxrank)
heights[i][j] = ( px,py, maxrank)
# Bias results relative to initial point, at origin
(x0,y0,home_rank) = heights[0][0]
for i in range(steps+1) :
for j in range(steps+1) :
(x,y,r) = heights[i][j]
x = x - x0
y = y - y0
r = r - home_rank
heights[i][j] = (x,y,r)
#print(heights)
for col in heights :
print(col)
return heights
return None
##################################################
class MicroscopeFeed:
loopthread = None
threadlock = None
endLoopRequest = False
focusValue = 0.0
vidcap = None
connected = False
def __init__(self,channel):
self.channel = channel
self.threadlock = threading.Lock()
self.loopthread = threading.Thread(target=self.loopThread)
self.vidcap = cv2.VideoCapture(self.channel)
if self.vidcap.isOpened() :
self.connected = True
else :
print('Microscope connection could not be established.')
def isConnected(self):
return self.connected
def startLoop(self):
self.loopthread.start()
def loopThread(self):
if not self.vidcap.isOpened() : return
smoothed_laplacian_variance = 0.0
while True :
chk,frame = self.vidcap.read()
height, width = frame.shape[:2]
sz = 0.20 * width
x0 = int(width/2 - sz/2)
x1 = int(width/2 + sz/2)
y0 = int(height/2 - sz/2)
y1 = int(height/2 + sz/2)
center_frame = frame[ y0:y1, x0:x1 ]
center_gray = cv2.cvtColor(center_frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('center',center_gray)
laplacian = cv2.Laplacian(center_gray,cv2.CV_64F)
#cv2.imshow('laplacian',laplacian)
v = laplacian.var()
#smoothed_v_factor = 0.25
smoothed_v_factor = 0.50
smoothed_laplacian_variance = v * smoothed_v_factor + smoothed_laplacian_variance * (1.0-smoothed_v_factor)
#print('{:.0f} - {:.0f}'.format(v,smoothed_laplacian_variance))
cv2.rectangle(frame, (x0, y0), (x1, y1),(0,255,0), 2)
#textpos = (x0, y0)
textpos = (10, 20)
cv2.putText(frame, 'v = {:.2f} {:.2f}'.format(v,smoothed_laplacian_variance),textpos,cv2.FONT_HERSHEY_DUPLEX,0.8,(225,0,0))
cv2.namedWindow('vidcap', cv2.WINDOW_NORMAL)
cv2.imshow('vidcap',frame)
cv2.waitKey(1) # Required for video to be displayed
with self.threadlock :
self.focusValue = smoothed_laplacian_variance
if self.endLoopRequest :
self.vidcap.release()
cv2.destroyAllWindows()
break
def endLoop(self):
with self.threadlock :
self.endLoopRequest = True
self.loopthread.join()
def getFocusValue(self):
f = 0.0
with self.threadlock :
f = self.focusValue
return f
##################################################
def main():
import optparse
parser = optparse.OptionParser('usage%prog -i <input file>')
parser.add_option('-i', '--infile', dest='infile', default='', help='The input gcode file, as exported by FlatCam.')
parser.add_option('-o', '--outfile', dest='outfile', default='', help='The output RML-1 file.')
parser.add_option("-z", '--zero', dest='zero', action="store_true", default=False, help='Zero the print head on the work surface.')
#parser.add_option('-s', '--serialport', dest='serialport', default='', help='The com port for the MDX-15. (Default: obtained from the printer driver)')
parser.add_option("-p", '--print', dest='print', action="store_true", default=False, help='Prints the RML-1 data.')
parser.add_option('-n', '--printerName', dest='printerName', default='Roland MODELA MDX-15', help='The windows printer name. (Default: Roland MODELA MDX-15)')
parser.add_option('-f', '--feedspeedfactor', dest='feedspeedfactor', default=1.0, help='Feed rate scaling factor (Default: 1.0)')
parser.add_option('--backlashX', dest='backlashX', default=0.0, help='Backlash compensation in X direction (in steps).')
parser.add_option('--backlashY', dest='backlashY', default=0.0, help='Backlash compensation in y direction (in steps).')
parser.add_option('--backlashZ', dest='backlashZ', default=0.0, help='Backlash compensation in z direction (in steps).')
parser.add_option('--levelingsegments', dest='levelingsegments', default=1, help='Number of segments to split the work area for microscope-based leveling. (Default: 1)')
parser.add_option('-m','--microscope', dest='microscope', default=False, help='Enable microscope on channel N')
(options,args) = parser.parse_args()
#print(options)
debugmode = False
# Find serial port number using the printer driver.
serialport = ''
if options.zero : # Printer driver is only required if we want to set the zero
import subprocess
shelloutput = subprocess.check_output('powershell -Command "(Get-WmiObject Win32_Printer -Filter \\"Name=\'{}\'\\").PortName"'.format(options.printerName))
if len(shelloutput)>0 :
try :
serialport = shelloutput.decode('utf-8').split(':')[0]
print( 'Found {} printer driver ({})'.format(options.printerName,serialport) )
except:
print('Error parsing com port: ' + str(shelloutput) )
else :
print('Could not find the printer driver for: ' + options.printerName)
if not debugmode :
sys.exit(1)
# Start microscope feed if requested
mic = None
if options.microscope != False :
mic = MicroscopeFeed( int(options.microscope) )
mic.startLoop()
#msvcrt.getwch()
#print( mic.getFocusValue() )
try:
# Manually set zero and microscope set points
x_offset = 0.0
y_offset = 0.0
modelaZeroControl = None
manualLevelingPoints = None
if options.zero :
modelaZeroControl = ModelaZeroControl(serialport)
if modelaZeroControl.connected or debugmode :
print('Setting Zero')
(x_offset,y_offset) = modelaZeroControl.run()
manualLevelingPoints = modelaZeroControl.getManualLevelingPoints()
if modelaZeroControl.exitRequested :
print('Terminating program.')
sys.exit(1)
else :
print('Could not connect to the printer to set the zero.')
# Find bed level using microscope focus
levelingData = None
if mic != None and mic.isConnected() and modelaZeroControl != None :
try:
levelingData = modelaZeroControl.getAutolevelingData(mic, steps=int(options.levelingsegments) )
except KeyboardInterrupt :
print('Leveling cancelled, terminating program.')
sys.exit(1)
# gcode to rml conversion
if options.infile != '' :
if options.outfile == '' : options.outfile = options.infile + '.prn'
print('Converting {} to {}'.format(options.infile,options.outfile))
converter = GCode2RmlConverter(x_offset, y_offset, float(options.feedspeedfactor), float(options.backlashX), float(options.backlashY), float(options.backlashZ), levelingData, manualLevelingPoints )
converter.convertFile( options.infile, options.outfile )
# Send RML code to the printer driver.
if options.print :
if options.outfile != '' :
print('Are you ready to print (y/n)?')
c = msvcrt.getwch()
if c == 'y' or c == 'Y' :
print('Printing: '+options.outfile)
os.system('RawFileToPrinter.exe "{}" "{}"'.format(options.outfile,options.printerName))
print('Procedure to cancel printing:')
print('1) Press the VIEW button on the printer.')
print('2) Cancel the print job(s) in windows. (start->Devices and Printers->...)')
print('3) Remove the usb cable to the printer.')
print('4) Press both the UP and Down buttons on the printer.')
print('5) When VIEW light stops blinking, press the VIEW butotn.')
print('6) Plug the usb cable back in.')
if mic != None and mic.isConnected() :
# Don't exit now if the camera is connected, in case we want visual feedback
print('Press any key to exit.')
msvcrt.getwch()
else :
print('Error: No file to be printed.')
except Exception as e:
#print(e)
traceback.print_exc()
# Release video stream
if mic != None :
mic.endLoop()
if __name__ == "__main__":
if sys.version_info[0] < 3 :
print("This script requires Python version 3")
sys.exit(1)
main() | GCode2RmlConverter | identifier_name |
core.go | // Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Author: [email protected] (Joel Sing)
/*
Package engine implements the Seesaw v2 engine component, which is
responsible for maintaining configuration information, handling state
transitions and providing communication between Seesaw v2 components.
*/
package engine
import (
"errors"
"fmt"
"net"
"net/rpc"
"os"
"sync"
"time"
"github.com/google/seesaw/common/seesaw"
"github.com/google/seesaw/common/server"
"github.com/google/seesaw/engine/config"
ncclient "github.com/google/seesaw/ncc/client"
ncctypes "github.com/google/seesaw/ncc/types"
spb "github.com/google/seesaw/pb/seesaw"
log "github.com/golang/glog"
)
const (
fwmAllocBase = 1 << 8
fwmAllocSize = 8000
)
// Engine contains the data necessary to run the Seesaw v2 Engine.
type Engine struct {
config *config.EngineConfig
notifier *config.Notifier
fwmAlloc *markAllocator
bgpManager *bgpManager
haManager *haManager
hcManager *healthcheckManager
ncc ncclient.NCC
lbInterface ncclient.LBInterface
cluster *config.Cluster
clusterLock sync.RWMutex
shutdown chan bool
shutdownARP chan bool
shutdownIPC chan bool
shutdownRPC chan bool
syncClient *syncClient
syncServer *syncServer
overrides map[string]seesaw.Override
overrideChan chan seesaw.Override
vlans map[uint16]*seesaw.VLAN
vlanLock sync.RWMutex
vservers map[string]*vserver
vserverAccess *vserverAccess
vserverSnapshots map[string]*seesaw.Vserver
vserverLock sync.RWMutex
vserverChan chan *seesaw.Vserver
startTime time.Time
arpMap map[string][]net.IP // iface name -> IP list
arpLock sync.Mutex
}
func newEngineWithNCC(cfg *config.EngineConfig, ncc ncclient.NCC) *Engine {
if cfg == nil {
defaultCfg := config.DefaultEngineConfig()
cfg = &defaultCfg
}
// TODO(jsing): Validate node, peer and cluster IP configuration.
engine := &Engine{
config: cfg,
fwmAlloc: newMarkAllocator(fwmAllocBase, fwmAllocSize),
ncc: ncc,
overrides: make(map[string]seesaw.Override),
overrideChan: make(chan seesaw.Override),
vlans: make(map[uint16]*seesaw.VLAN),
vservers: make(map[string]*vserver),
shutdown: make(chan bool),
shutdownARP: make(chan bool),
shutdownIPC: make(chan bool),
shutdownRPC: make(chan bool),
vserverAccess: newVserverAccess(),
vserverSnapshots: make(map[string]*seesaw.Vserver),
vserverChan: make(chan *seesaw.Vserver, 1000),
}
engine.bgpManager = newBGPManager(engine, cfg.BGPUpdateInterval)
engine.haManager = newHAManager(engine, cfg.HAStateTimeout)
engine.hcManager = newHealthcheckManager(engine)
engine.syncClient = newSyncClient(engine)
engine.syncServer = newSyncServer(engine)
return engine
}
// NewEngine returns an initialised Engine struct.
func NewEngine(cfg *config.EngineConfig) *Engine {
ncc, err := ncclient.NewNCC(cfg.NCCSocket)
if err != nil {
log.Fatalf("Failed to create ncc client: %v", err)
}
return newEngineWithNCC(cfg, ncc)
}
// Run starts the Engine.
func (e *Engine) Run() {
log.Infof("Seesaw Engine starting for %s", e.config.ClusterName)
e.initNetwork()
n, err := config.NewNotifier(e.config)
if err != nil {
log.Fatalf("config.NewNotifier() failed: %v", err)
}
e.notifier = n
if e.config.AnycastEnabled {
go e.bgpManager.run()
}
go e.hcManager.run()
go e.syncClient.run()
go e.syncServer.run()
go e.syncRPC()
go e.engineIPC()
go e.gratuitousARP()
e.manager()
}
// Shutdown attempts to perform a graceful shutdown of the engine.
func (e *Engine) Shutdown() {
e.shutdown <- true
}
// haStatus returns the current HA status from the engine.
func (e *Engine) haStatus() seesaw.HAStatus {
e.haManager.statusLock.RLock()
defer e.haManager.statusLock.RUnlock()
return e.haManager.status
}
// queueOverride queues an Override for processing.
func (e *Engine) queueOverride(o seesaw.Override) {
e.overrideChan <- o
}
// setHAState tells the engine what its current HAState should be.
func (e *Engine) setHAState(state spb.HaState) error {
select {
case e.haManager.stateChan <- state:
default:
return fmt.Errorf("state channel if full")
}
return nil
}
// setHAStatus tells the engine what the current HA status is.
func (e *Engine) setHAStatus(status seesaw.HAStatus) error {
select {
case e.haManager.statusChan <- status:
default:
return fmt.Errorf("status channel if full")
}
return nil
}
// haConfig returns the HAConfig for an engine.
func (e *Engine) haConfig() (*seesaw.HAConfig, error) {
n, err := e.thisNode()
if err != nil {
return nil, err
}
// TODO(jsing): This does not allow for IPv6-only operation.
return &seesaw.HAConfig{
Enabled: n.State != spb.HaState_DISABLED,
LocalAddr: e.config.Node.IPv4Addr,
RemoteAddr: e.config.VRRPDestIP,
Priority: n.Priority,
VRID: e.config.VRID,
}, nil
}
// thisNode returns the Node for the machine on which this engine is running.
func (e *Engine) thisNode() (*seesaw.Node, error) {
e.clusterLock.RLock()
c := e.cluster
e.clusterLock.RUnlock()
if c == nil {
return nil, fmt.Errorf("cluster configuration not loaded")
}
// TODO(jsing): This does not allow for IPv6-only operation.
ip := e.config.Node.IPv4Addr
for _, n := range c.Nodes {
if ip.Equal(n.IPv4Addr) {
return n, nil
}
}
return nil, fmt.Errorf("node %v not configured", ip)
}
// engineIPC starts an RPC server to handle IPC via a Unix Domain socket.
func (e *Engine) engineIPC() {
if err := server.RemoveUnixSocket(e.config.SocketPath); err != nil {
log.Fatalf("Failed to remove socket: %v", err)
}
ln, err := net.Listen("unix", e.config.SocketPath)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
defer os.Remove(e.config.SocketPath)
seesawIPC := rpc.NewServer()
seesawIPC.Register(&SeesawEngine{e})
go server.RPCAccept(ln, seesawIPC)
<-e.shutdownIPC
ln.Close()
e.shutdownIPC <- true
}
// syncRPC starts a server to handle synchronisation RPCs via a TCP socket.
func (e *Engine) syncRPC() {
// TODO(jsing): Make this default to IPv6, if configured.
addr := &net.TCPAddr{
IP: e.config.Node.IPv4Addr,
Port: e.config.SyncPort,
}
ln, err := net.ListenTCP("tcp", addr)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
go e.syncServer.serve(ln)
<-e.shutdownRPC
ln.Close()
e.shutdownRPC <- true
}
// initNetwork initialises the network configuration for load balancing.
func (e *Engine) initNetwork() {
if e.config.AnycastEnabled {
if err := e.ncc.BGPWithdrawAll(); err != nil {
log.Fatalf("Failed to withdraw all BGP advertisements: %v", err)
}
}
if err := e.ncc.IPVSFlush(); err != nil {
log.Fatalf("Failed to flush IPVS table: %v", err)
}
lbCfg := &ncctypes.LBConfig{
ClusterVIP: e.config.ClusterVIP,
DummyInterface: e.config.DummyInterface,
NodeInterface: e.config.NodeInterface,
Node: e.config.Node,
RoutingTableID: e.config.RoutingTableID,
VRID: e.config.VRID,
UseVMAC: e.config.UseVMAC,
}
e.lbInterface = e.ncc.NewLBInterface(e.config.LBInterface, lbCfg)
if err := e.lbInterface.Init(); err != nil {
log.Fatalf("Failed to initialise LB interface: %v", err)
}
if e.config.AnycastEnabled {
e.initAnycast()
}
}
// initAnycast initialises the anycast configuration.
func (e *Engine) initAnycast() {
vips := make([]*seesaw.VIP, 0)
if e.config.ClusterVIP.IPv4Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv4 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
if e.config.ClusterVIP.IPv6Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv6 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
for _, vip := range vips {
if err := e.lbInterface.AddVIP(vip); err != nil {
log.Fatalf("Failed to add VIP %v: %v", vip, err)
}
log.Infof("Advertising BGP route for %v", vip)
if err := e.ncc.BGPAdvertiseVIP(vip.IP.IP()); err != nil {
log.Fatalf("Failed to advertise VIP %v: %v", vip, err)
}
}
}
// gratuitousARP sends gratuitous ARP messages at regular intervals, if this
// node is the HA master.
func (e *Engine) gratuitousARP() {
arpTicker := time.NewTicker(e.config.GratuitousARPInterval)
var announced bool
for {
select {
case <-arpTicker.C:
if e.haManager.state() != spb.HaState_LEADER {
if announced {
log.Info("Stopping gratuitous ARPs")
announced = false
}
continue
}
if !announced {
log.Infof("Starting gratuitous ARPs every %s", e.config.GratuitousARPInterval)
announced = true
}
e.arpLock.Lock()
arpMap := e.arpMap
e.arpLock.Unlock()
if err := e.ncc.ARPSendGratuitous(arpMap); err != nil {
log.Fatalf("Failed to send gratuitous ARP: %v", err)
}
case <-e.shutdownARP:
e.shutdownARP <- true
return
}
}
}
// manager is responsible for managing and co-ordinating various parts of the
// seesaw engine.
func (e *Engine) manager() {
for {
// process ha state updates first before processing others
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
continue
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
continue
default:
}
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
case n := <-e.notifier.C:
log.Infof("Received cluster config notification; %v", &n)
e.syncServer.notify(&SyncNote{Type: SNTConfigUpdate, Time: time.Now()})
vua, err := newVserverUserAccess(n.Cluster)
if err != nil {
log.Errorf("Ignoring notification due to invalid vserver access configuration: %v", err)
return
}
e.clusterLock.Lock()
e.cluster = n.Cluster
e.clusterLock.Unlock()
e.vserverAccess.update(vua)
if n.MetadataOnly {
log.Infof("Only metadata changes found, processing complete.")
continue
}
if ha, err := e.haConfig(); err != nil {
log.Errorf("Manager failed to determine haConfig: %v", err)
} else if ha.Enabled {
e.haManager.enable()
} else {
e.haManager.disable()
}
node, err := e.thisNode()
if err != nil {
log.Errorf("Manager failed to identify local node: %v", err)
continue
}
if !node.VserversEnabled {
e.shutdownVservers()
e.deleteVLANs()
continue
}
// Process new cluster configuration.
e.updateVLANs()
// TODO(jsing): Ensure this does not block.
e.updateVservers()
e.updateARPMap()
case <-e.haManager.timer():
log.Infof("Timed out waiting for HAState")
e.haManager.setState(spb.HaState_UNKNOWN)
case svs := <-e.vserverChan:
if _, ok := e.vservers[svs.Name]; !ok {
log.Infof("Received vserver snapshot for unconfigured vserver %s, ignoring", svs.Name)
break
}
log.V(1).Infof("Updating vserver snapshot for %s", svs.Name)
e.vserverLock.Lock()
e.vserverSnapshots[svs.Name] = svs
e.vserverLock.Unlock()
case override := <-e.overrideChan:
sn := &SyncNote{Type: SNTOverride, Time: time.Now()}
switch o := override.(type) {
case *seesaw.BackendOverride:
sn.BackendOverride = o
case *seesaw.DestinationOverride:
sn.DestinationOverride = o
case *seesaw.VserverOverride:
sn.VserverOverride = o
}
e.syncServer.notify(sn)
e.handleOverride(override)
case <-e.shutdown:
log.Info("Shutting down engine...")
// Tell other components to shutdown and then wait for
// them to do so.
e.shutdownIPC <- true
e.shutdownRPC <- true
<-e.shutdownIPC
<-e.shutdownRPC
e.syncClient.disable()
e.shutdownVservers()
e.hcManager.shutdown()
e.deleteVLANs()
e.ncc.Close()
log.Info("Shutdown complete")
return
}
}
}
// updateVservers processes a list of vserver configurations then stops
// deleted vservers, spawns new vservers and updates the existing vservers.
func (e *Engine) updateVservers() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
// Delete vservers that no longer exist in the new configuration.
for name, vserver := range e.vservers {
if cluster.Vservers[name] == nil {
log.Infof("Stopping unconfigured vserver %s", name)
vserver.stop()
<-vserver.stopped
delete(e.vservers, name)
e.vserverLock.Lock()
delete(e.vserverSnapshots, name)
e.vserverLock.Unlock()
}
}
// Spawn new vservers and provide current configurations.
for _, config := range cluster.Vservers {
if e.vservers[config.Name] == nil {
vserver := newVserver(e)
go vserver.run()
e.vservers[config.Name] = vserver
}
}
for _, override := range e.overrides {
e.distributeOverride(override)
}
for _, config := range cluster.Vservers {
e.vservers[config.Name].updateConfig(config)
}
}
// updateARPMap goes through the new config and updates the internal ARP map so that
// the gratutious arp loop adopts to new changes.
func (e *Engine) updateARPMap() {
arpMap := make(map[string][]net.IP)
defer func() {
e.arpLock.Lock()
defer e.arpLock.Unlock()
e.arpMap = arpMap
}()
arpMap[e.config.LBInterface] = []net.IP{e.config.ClusterVIP.IPv4Addr}
if e.config.UseVMAC {
// If using VMAC, only announce ClusterVIP is enough.
return
}
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
e.vlanLock.RLock()
defer e.vlanLock.RUnlock()
for _, vserver := range cluster.Vservers {
for _, vip := range vserver.VIPs {
if vip.Type == seesaw.AnycastVIP {
continue
}
ip := vip.IP.IP()
if ip.To4() == nil {
// IPv6 address is not yet supported.
continue
}
found := false
for _, vlan := range e.vlans {
ipNet := vlan.IPv4Net()
if ipNet == nil {
continue
}
if ipNet.Contains(ip) {
ifName := fmt.Sprintf("%s.%d", e.config.LBInterface, vlan.ID)
arpMap[ifName] = append(arpMap[ifName], ip)
found = true
break
}
}
if !found {
// Use LB interface if no vlan matches
arpMap[e.config.LBInterface] = append(arpMap[e.config.LBInterface], ip)
}
}
}
}
// shutdownVservers shuts down all running vservers.
func (e *Engine) shutdownVservers() {
for _, v := range e.vservers {
v.stop()
}
for name, v := range e.vservers {
<-v.stopped
delete(e.vservers, name)
}
e.vserverLock.Lock()
e.vserverSnapshots = make(map[string]*seesaw.Vserver)
e.vserverLock.Unlock()
}
// updateVLANs creates and destroys VLAN interfaces for the load balancer per
// the cluster configuration.
func (e *Engine) updateVLANs() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
add := make([]*seesaw.VLAN, 0)
remove := make([]*seesaw.VLAN, 0)
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for key, vlan := range e.vlans {
if cluster.VLANs[key] == nil {
remove = append(remove, vlan)
} else if !vlan.Equal(cluster.VLANs[key]) {
// TODO(angusc): This will break any VIPs that are currently configured
// on the VLAN interface. Fix!
remove = append(remove, vlan)
add = append(add, cluster.VLANs[key])
}
}
for key, vlan := range cluster.VLANs {
if e.vlans[key] == nil {
add = append(add, vlan)
}
}
for _, vlan := range remove {
log.Infof("Removing VLAN interface %v", vlan)
if err := e.lbInterface.DeleteVLAN(vlan); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", vlan, err)
}
}
for _, vlan := range add {
log.Infof("Adding VLAN interface %v", vlan)
if err := e.lbInterface.AddVLAN(vlan); err != nil {
log.Fatalf("Failed to add VLAN interface %v: %v", vlan, err)
}
}
e.vlans = cluster.VLANs
}
// deleteVLANs removes all the VLAN interfaces that have been created by this
// engine.
func (e *Engine) deleteVLANs() {
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for k, v := range e.vlans {
if err := e.lbInterface.DeleteVLAN(v); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", v, err)
}
delete(e.vlans, k)
}
}
// handleOverride handles an incoming Override.
func (e *Engine) handleOverride(o seesaw.Override) {
e.overrides[o.Target()] = o
e.distributeOverride(o)
if o.State() == seesaw.OverrideDefault {
delete(e.overrides, o.Target())
}
}
// distributeOverride distributes an Override to the appropriate vservers.
func (e *Engine) distributeOverride(o seesaw.Override) {
// Send VserverOverrides and DestinationOverrides to the appropriate vserver.
// Send BackendOverrides to all vservers.
switch override := o.(type) {
case *seesaw.VserverOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.DestinationOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.BackendOverride:
for _, vserver := range e.vservers {
vserver.queueOverride(o)
}
}
}
// becomeMaster performs the necessary actions for the Seesaw Engine to
// become the master node.
func (e *Engine) becomeMaster() {
e.syncClient.disable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Up(); err != nil {
log.Fatalf("Failed to bring LB interface up: %v", err)
}
}
// becomeBackup performs the neccesary actions for the Seesaw Engine to
// stop being the master node and become the backup node.
func (e *Engine) becomeBackup() {
e.syncClient.enable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Down(); err != nil {
log.Fatalf("Failed to bring LB interface down: %v", err)
}
}
// markAllocator handles the allocation of marks.
type markAllocator struct {
lock sync.RWMutex
marks []uint32
}
// newMarkAllocator returns a mark allocator initialised with the specified
// base and size.
func newMarkAllocator(base, size int) *markAllocator {
ma := &markAllocator{
marks: make([]uint32, 0, size),
}
for i := 0; i < size; i++ {
ma.put(uint32(base + i))
}
return ma
}
// get returns the next available mark from the mark allocator.
func (ma *markAllocator) | () (uint32, error) {
ma.lock.Lock()
defer ma.lock.Unlock()
if len(ma.marks) == 0 {
return 0, errors.New("allocator exhausted")
}
mark := ma.marks[0]
ma.marks = ma.marks[1:]
return mark, nil
}
// put returns the specified mark to the mark allocator.
func (ma *markAllocator) put(mark uint32) {
ma.lock.Lock()
defer ma.lock.Unlock()
ma.marks = append(ma.marks, mark)
}
| get | identifier_name |
core.go | // Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Author: [email protected] (Joel Sing)
/*
Package engine implements the Seesaw v2 engine component, which is
responsible for maintaining configuration information, handling state
transitions and providing communication between Seesaw v2 components.
*/
package engine
import (
"errors"
"fmt"
"net"
"net/rpc"
"os"
"sync"
"time"
"github.com/google/seesaw/common/seesaw"
"github.com/google/seesaw/common/server"
"github.com/google/seesaw/engine/config"
ncclient "github.com/google/seesaw/ncc/client"
ncctypes "github.com/google/seesaw/ncc/types"
spb "github.com/google/seesaw/pb/seesaw"
log "github.com/golang/glog"
)
const (
fwmAllocBase = 1 << 8
fwmAllocSize = 8000
)
// Engine contains the data necessary to run the Seesaw v2 Engine.
type Engine struct {
config *config.EngineConfig
notifier *config.Notifier
fwmAlloc *markAllocator
bgpManager *bgpManager
haManager *haManager
hcManager *healthcheckManager
ncc ncclient.NCC
lbInterface ncclient.LBInterface
cluster *config.Cluster
clusterLock sync.RWMutex
shutdown chan bool
shutdownARP chan bool
shutdownIPC chan bool
shutdownRPC chan bool
syncClient *syncClient
syncServer *syncServer
overrides map[string]seesaw.Override
overrideChan chan seesaw.Override
vlans map[uint16]*seesaw.VLAN
vlanLock sync.RWMutex
vservers map[string]*vserver
vserverAccess *vserverAccess
vserverSnapshots map[string]*seesaw.Vserver
vserverLock sync.RWMutex
vserverChan chan *seesaw.Vserver
startTime time.Time
arpMap map[string][]net.IP // iface name -> IP list
arpLock sync.Mutex
}
func newEngineWithNCC(cfg *config.EngineConfig, ncc ncclient.NCC) *Engine {
if cfg == nil {
defaultCfg := config.DefaultEngineConfig()
cfg = &defaultCfg
}
// TODO(jsing): Validate node, peer and cluster IP configuration.
engine := &Engine{
config: cfg,
fwmAlloc: newMarkAllocator(fwmAllocBase, fwmAllocSize),
ncc: ncc,
overrides: make(map[string]seesaw.Override),
overrideChan: make(chan seesaw.Override),
vlans: make(map[uint16]*seesaw.VLAN),
vservers: make(map[string]*vserver),
shutdown: make(chan bool),
shutdownARP: make(chan bool),
shutdownIPC: make(chan bool),
shutdownRPC: make(chan bool),
vserverAccess: newVserverAccess(),
vserverSnapshots: make(map[string]*seesaw.Vserver),
vserverChan: make(chan *seesaw.Vserver, 1000),
}
engine.bgpManager = newBGPManager(engine, cfg.BGPUpdateInterval)
engine.haManager = newHAManager(engine, cfg.HAStateTimeout)
engine.hcManager = newHealthcheckManager(engine)
engine.syncClient = newSyncClient(engine)
engine.syncServer = newSyncServer(engine)
return engine
}
// NewEngine returns an initialised Engine struct.
func NewEngine(cfg *config.EngineConfig) *Engine {
ncc, err := ncclient.NewNCC(cfg.NCCSocket)
if err != nil {
log.Fatalf("Failed to create ncc client: %v", err)
}
return newEngineWithNCC(cfg, ncc)
}
// Run starts the Engine.
func (e *Engine) Run() {
log.Infof("Seesaw Engine starting for %s", e.config.ClusterName)
e.initNetwork()
n, err := config.NewNotifier(e.config)
if err != nil {
log.Fatalf("config.NewNotifier() failed: %v", err)
}
e.notifier = n
if e.config.AnycastEnabled {
go e.bgpManager.run()
}
go e.hcManager.run()
go e.syncClient.run()
go e.syncServer.run()
go e.syncRPC()
go e.engineIPC()
go e.gratuitousARP()
e.manager()
}
// Shutdown attempts to perform a graceful shutdown of the engine.
func (e *Engine) Shutdown() {
e.shutdown <- true
}
// haStatus returns the current HA status from the engine.
func (e *Engine) haStatus() seesaw.HAStatus {
e.haManager.statusLock.RLock()
defer e.haManager.statusLock.RUnlock()
return e.haManager.status
}
// queueOverride queues an Override for processing.
func (e *Engine) queueOverride(o seesaw.Override) {
e.overrideChan <- o
}
// setHAState tells the engine what its current HAState should be.
func (e *Engine) setHAState(state spb.HaState) error {
select {
case e.haManager.stateChan <- state:
default:
return fmt.Errorf("state channel if full")
}
return nil
}
// setHAStatus tells the engine what the current HA status is.
func (e *Engine) setHAStatus(status seesaw.HAStatus) error {
select {
case e.haManager.statusChan <- status:
default:
return fmt.Errorf("status channel if full")
}
return nil
}
// haConfig returns the HAConfig for an engine.
func (e *Engine) haConfig() (*seesaw.HAConfig, error) {
n, err := e.thisNode()
if err != nil {
return nil, err
}
// TODO(jsing): This does not allow for IPv6-only operation.
return &seesaw.HAConfig{
Enabled: n.State != spb.HaState_DISABLED,
LocalAddr: e.config.Node.IPv4Addr,
RemoteAddr: e.config.VRRPDestIP,
Priority: n.Priority,
VRID: e.config.VRID,
}, nil
}
// thisNode returns the Node for the machine on which this engine is running.
func (e *Engine) thisNode() (*seesaw.Node, error) {
e.clusterLock.RLock()
c := e.cluster
e.clusterLock.RUnlock()
if c == nil {
return nil, fmt.Errorf("cluster configuration not loaded")
}
// TODO(jsing): This does not allow for IPv6-only operation.
ip := e.config.Node.IPv4Addr
for _, n := range c.Nodes {
if ip.Equal(n.IPv4Addr) {
return n, nil
}
}
return nil, fmt.Errorf("node %v not configured", ip)
}
// engineIPC starts an RPC server to handle IPC via a Unix Domain socket.
func (e *Engine) engineIPC() {
if err := server.RemoveUnixSocket(e.config.SocketPath); err != nil {
log.Fatalf("Failed to remove socket: %v", err)
}
ln, err := net.Listen("unix", e.config.SocketPath)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
defer os.Remove(e.config.SocketPath)
seesawIPC := rpc.NewServer()
seesawIPC.Register(&SeesawEngine{e})
go server.RPCAccept(ln, seesawIPC)
<-e.shutdownIPC
ln.Close()
e.shutdownIPC <- true
}
// syncRPC starts a server to handle synchronisation RPCs via a TCP socket.
func (e *Engine) syncRPC() {
// TODO(jsing): Make this default to IPv6, if configured.
addr := &net.TCPAddr{
IP: e.config.Node.IPv4Addr,
Port: e.config.SyncPort,
}
ln, err := net.ListenTCP("tcp", addr)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
go e.syncServer.serve(ln)
<-e.shutdownRPC
ln.Close()
e.shutdownRPC <- true
}
// initNetwork initialises the network configuration for load balancing.
func (e *Engine) initNetwork() {
if e.config.AnycastEnabled {
if err := e.ncc.BGPWithdrawAll(); err != nil {
log.Fatalf("Failed to withdraw all BGP advertisements: %v", err)
}
}
if err := e.ncc.IPVSFlush(); err != nil {
log.Fatalf("Failed to flush IPVS table: %v", err)
}
lbCfg := &ncctypes.LBConfig{
ClusterVIP: e.config.ClusterVIP,
DummyInterface: e.config.DummyInterface,
NodeInterface: e.config.NodeInterface,
Node: e.config.Node,
RoutingTableID: e.config.RoutingTableID,
VRID: e.config.VRID,
UseVMAC: e.config.UseVMAC,
}
e.lbInterface = e.ncc.NewLBInterface(e.config.LBInterface, lbCfg)
if err := e.lbInterface.Init(); err != nil {
log.Fatalf("Failed to initialise LB interface: %v", err)
}
if e.config.AnycastEnabled {
e.initAnycast()
}
}
// initAnycast initialises the anycast configuration.
func (e *Engine) initAnycast() {
vips := make([]*seesaw.VIP, 0)
if e.config.ClusterVIP.IPv4Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv4 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
if e.config.ClusterVIP.IPv6Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv6 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
for _, vip := range vips {
if err := e.lbInterface.AddVIP(vip); err != nil {
log.Fatalf("Failed to add VIP %v: %v", vip, err)
}
log.Infof("Advertising BGP route for %v", vip)
if err := e.ncc.BGPAdvertiseVIP(vip.IP.IP()); err != nil {
log.Fatalf("Failed to advertise VIP %v: %v", vip, err)
}
}
}
// gratuitousARP sends gratuitous ARP messages at regular intervals, if this
// node is the HA master.
func (e *Engine) gratuitousARP() {
arpTicker := time.NewTicker(e.config.GratuitousARPInterval)
var announced bool
for {
select {
case <-arpTicker.C:
if e.haManager.state() != spb.HaState_LEADER {
if announced {
log.Info("Stopping gratuitous ARPs")
announced = false
}
continue
}
if !announced {
log.Infof("Starting gratuitous ARPs every %s", e.config.GratuitousARPInterval)
announced = true
}
e.arpLock.Lock()
arpMap := e.arpMap
e.arpLock.Unlock()
if err := e.ncc.ARPSendGratuitous(arpMap); err != nil {
log.Fatalf("Failed to send gratuitous ARP: %v", err)
}
case <-e.shutdownARP:
e.shutdownARP <- true
return
}
}
}
// manager is responsible for managing and co-ordinating various parts of the
// seesaw engine.
func (e *Engine) manager() {
for {
// process ha state updates first before processing others
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
continue
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
continue
default:
}
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
case n := <-e.notifier.C:
log.Infof("Received cluster config notification; %v", &n)
e.syncServer.notify(&SyncNote{Type: SNTConfigUpdate, Time: time.Now()})
vua, err := newVserverUserAccess(n.Cluster)
if err != nil {
log.Errorf("Ignoring notification due to invalid vserver access configuration: %v", err)
return
}
e.clusterLock.Lock()
e.cluster = n.Cluster
e.clusterLock.Unlock()
e.vserverAccess.update(vua)
if n.MetadataOnly {
log.Infof("Only metadata changes found, processing complete.")
continue
}
if ha, err := e.haConfig(); err != nil {
log.Errorf("Manager failed to determine haConfig: %v", err)
} else if ha.Enabled | else {
e.haManager.disable()
}
node, err := e.thisNode()
if err != nil {
log.Errorf("Manager failed to identify local node: %v", err)
continue
}
if !node.VserversEnabled {
e.shutdownVservers()
e.deleteVLANs()
continue
}
// Process new cluster configuration.
e.updateVLANs()
// TODO(jsing): Ensure this does not block.
e.updateVservers()
e.updateARPMap()
case <-e.haManager.timer():
log.Infof("Timed out waiting for HAState")
e.haManager.setState(spb.HaState_UNKNOWN)
case svs := <-e.vserverChan:
if _, ok := e.vservers[svs.Name]; !ok {
log.Infof("Received vserver snapshot for unconfigured vserver %s, ignoring", svs.Name)
break
}
log.V(1).Infof("Updating vserver snapshot for %s", svs.Name)
e.vserverLock.Lock()
e.vserverSnapshots[svs.Name] = svs
e.vserverLock.Unlock()
case override := <-e.overrideChan:
sn := &SyncNote{Type: SNTOverride, Time: time.Now()}
switch o := override.(type) {
case *seesaw.BackendOverride:
sn.BackendOverride = o
case *seesaw.DestinationOverride:
sn.DestinationOverride = o
case *seesaw.VserverOverride:
sn.VserverOverride = o
}
e.syncServer.notify(sn)
e.handleOverride(override)
case <-e.shutdown:
log.Info("Shutting down engine...")
// Tell other components to shutdown and then wait for
// them to do so.
e.shutdownIPC <- true
e.shutdownRPC <- true
<-e.shutdownIPC
<-e.shutdownRPC
e.syncClient.disable()
e.shutdownVservers()
e.hcManager.shutdown()
e.deleteVLANs()
e.ncc.Close()
log.Info("Shutdown complete")
return
}
}
}
// updateVservers processes a list of vserver configurations then stops
// deleted vservers, spawns new vservers and updates the existing vservers.
func (e *Engine) updateVservers() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
// Delete vservers that no longer exist in the new configuration.
for name, vserver := range e.vservers {
if cluster.Vservers[name] == nil {
log.Infof("Stopping unconfigured vserver %s", name)
vserver.stop()
<-vserver.stopped
delete(e.vservers, name)
e.vserverLock.Lock()
delete(e.vserverSnapshots, name)
e.vserverLock.Unlock()
}
}
// Spawn new vservers and provide current configurations.
for _, config := range cluster.Vservers {
if e.vservers[config.Name] == nil {
vserver := newVserver(e)
go vserver.run()
e.vservers[config.Name] = vserver
}
}
for _, override := range e.overrides {
e.distributeOverride(override)
}
for _, config := range cluster.Vservers {
e.vservers[config.Name].updateConfig(config)
}
}
// updateARPMap goes through the new config and updates the internal ARP map so that
// the gratutious arp loop adopts to new changes.
func (e *Engine) updateARPMap() {
arpMap := make(map[string][]net.IP)
defer func() {
e.arpLock.Lock()
defer e.arpLock.Unlock()
e.arpMap = arpMap
}()
arpMap[e.config.LBInterface] = []net.IP{e.config.ClusterVIP.IPv4Addr}
if e.config.UseVMAC {
// If using VMAC, only announce ClusterVIP is enough.
return
}
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
e.vlanLock.RLock()
defer e.vlanLock.RUnlock()
for _, vserver := range cluster.Vservers {
for _, vip := range vserver.VIPs {
if vip.Type == seesaw.AnycastVIP {
continue
}
ip := vip.IP.IP()
if ip.To4() == nil {
// IPv6 address is not yet supported.
continue
}
found := false
for _, vlan := range e.vlans {
ipNet := vlan.IPv4Net()
if ipNet == nil {
continue
}
if ipNet.Contains(ip) {
ifName := fmt.Sprintf("%s.%d", e.config.LBInterface, vlan.ID)
arpMap[ifName] = append(arpMap[ifName], ip)
found = true
break
}
}
if !found {
// Use LB interface if no vlan matches
arpMap[e.config.LBInterface] = append(arpMap[e.config.LBInterface], ip)
}
}
}
}
// shutdownVservers shuts down all running vservers.
func (e *Engine) shutdownVservers() {
for _, v := range e.vservers {
v.stop()
}
for name, v := range e.vservers {
<-v.stopped
delete(e.vservers, name)
}
e.vserverLock.Lock()
e.vserverSnapshots = make(map[string]*seesaw.Vserver)
e.vserverLock.Unlock()
}
// updateVLANs creates and destroys VLAN interfaces for the load balancer per
// the cluster configuration.
func (e *Engine) updateVLANs() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
add := make([]*seesaw.VLAN, 0)
remove := make([]*seesaw.VLAN, 0)
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for key, vlan := range e.vlans {
if cluster.VLANs[key] == nil {
remove = append(remove, vlan)
} else if !vlan.Equal(cluster.VLANs[key]) {
// TODO(angusc): This will break any VIPs that are currently configured
// on the VLAN interface. Fix!
remove = append(remove, vlan)
add = append(add, cluster.VLANs[key])
}
}
for key, vlan := range cluster.VLANs {
if e.vlans[key] == nil {
add = append(add, vlan)
}
}
for _, vlan := range remove {
log.Infof("Removing VLAN interface %v", vlan)
if err := e.lbInterface.DeleteVLAN(vlan); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", vlan, err)
}
}
for _, vlan := range add {
log.Infof("Adding VLAN interface %v", vlan)
if err := e.lbInterface.AddVLAN(vlan); err != nil {
log.Fatalf("Failed to add VLAN interface %v: %v", vlan, err)
}
}
e.vlans = cluster.VLANs
}
// deleteVLANs removes all the VLAN interfaces that have been created by this
// engine.
func (e *Engine) deleteVLANs() {
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for k, v := range e.vlans {
if err := e.lbInterface.DeleteVLAN(v); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", v, err)
}
delete(e.vlans, k)
}
}
// handleOverride handles an incoming Override.
func (e *Engine) handleOverride(o seesaw.Override) {
e.overrides[o.Target()] = o
e.distributeOverride(o)
if o.State() == seesaw.OverrideDefault {
delete(e.overrides, o.Target())
}
}
// distributeOverride distributes an Override to the appropriate vservers.
func (e *Engine) distributeOverride(o seesaw.Override) {
// Send VserverOverrides and DestinationOverrides to the appropriate vserver.
// Send BackendOverrides to all vservers.
switch override := o.(type) {
case *seesaw.VserverOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.DestinationOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.BackendOverride:
for _, vserver := range e.vservers {
vserver.queueOverride(o)
}
}
}
// becomeMaster performs the necessary actions for the Seesaw Engine to
// become the master node.
func (e *Engine) becomeMaster() {
e.syncClient.disable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Up(); err != nil {
log.Fatalf("Failed to bring LB interface up: %v", err)
}
}
// becomeBackup performs the neccesary actions for the Seesaw Engine to
// stop being the master node and become the backup node.
func (e *Engine) becomeBackup() {
e.syncClient.enable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Down(); err != nil {
log.Fatalf("Failed to bring LB interface down: %v", err)
}
}
// markAllocator handles the allocation of marks.
type markAllocator struct {
lock sync.RWMutex
marks []uint32
}
// newMarkAllocator returns a mark allocator initialised with the specified
// base and size.
func newMarkAllocator(base, size int) *markAllocator {
ma := &markAllocator{
marks: make([]uint32, 0, size),
}
for i := 0; i < size; i++ {
ma.put(uint32(base + i))
}
return ma
}
// get returns the next available mark from the mark allocator.
func (ma *markAllocator) get() (uint32, error) {
ma.lock.Lock()
defer ma.lock.Unlock()
if len(ma.marks) == 0 {
return 0, errors.New("allocator exhausted")
}
mark := ma.marks[0]
ma.marks = ma.marks[1:]
return mark, nil
}
// put returns the specified mark to the mark allocator.
func (ma *markAllocator) put(mark uint32) {
ma.lock.Lock()
defer ma.lock.Unlock()
ma.marks = append(ma.marks, mark)
}
| {
e.haManager.enable()
} | conditional_block |
core.go | // Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Author: [email protected] (Joel Sing)
/*
Package engine implements the Seesaw v2 engine component, which is
responsible for maintaining configuration information, handling state
transitions and providing communication between Seesaw v2 components.
*/
package engine
import (
"errors"
"fmt"
"net"
"net/rpc"
"os"
"sync"
"time"
"github.com/google/seesaw/common/seesaw"
"github.com/google/seesaw/common/server"
"github.com/google/seesaw/engine/config"
ncclient "github.com/google/seesaw/ncc/client"
ncctypes "github.com/google/seesaw/ncc/types"
spb "github.com/google/seesaw/pb/seesaw"
log "github.com/golang/glog"
)
const (
fwmAllocBase = 1 << 8
fwmAllocSize = 8000
)
// Engine contains the data necessary to run the Seesaw v2 Engine.
type Engine struct {
config *config.EngineConfig
notifier *config.Notifier
fwmAlloc *markAllocator
bgpManager *bgpManager
haManager *haManager
hcManager *healthcheckManager
ncc ncclient.NCC
lbInterface ncclient.LBInterface
cluster *config.Cluster
clusterLock sync.RWMutex
shutdown chan bool
shutdownARP chan bool
shutdownIPC chan bool
shutdownRPC chan bool
syncClient *syncClient
syncServer *syncServer
overrides map[string]seesaw.Override
overrideChan chan seesaw.Override
vlans map[uint16]*seesaw.VLAN
vlanLock sync.RWMutex
vservers map[string]*vserver
vserverAccess *vserverAccess
vserverSnapshots map[string]*seesaw.Vserver
vserverLock sync.RWMutex
vserverChan chan *seesaw.Vserver
startTime time.Time
arpMap map[string][]net.IP // iface name -> IP list
arpLock sync.Mutex
}
func newEngineWithNCC(cfg *config.EngineConfig, ncc ncclient.NCC) *Engine {
if cfg == nil {
defaultCfg := config.DefaultEngineConfig()
cfg = &defaultCfg
}
// TODO(jsing): Validate node, peer and cluster IP configuration.
engine := &Engine{
config: cfg,
fwmAlloc: newMarkAllocator(fwmAllocBase, fwmAllocSize),
ncc: ncc,
overrides: make(map[string]seesaw.Override),
overrideChan: make(chan seesaw.Override),
vlans: make(map[uint16]*seesaw.VLAN),
vservers: make(map[string]*vserver),
shutdown: make(chan bool),
shutdownARP: make(chan bool),
shutdownIPC: make(chan bool),
shutdownRPC: make(chan bool),
vserverAccess: newVserverAccess(),
vserverSnapshots: make(map[string]*seesaw.Vserver),
vserverChan: make(chan *seesaw.Vserver, 1000),
}
engine.bgpManager = newBGPManager(engine, cfg.BGPUpdateInterval)
engine.haManager = newHAManager(engine, cfg.HAStateTimeout)
engine.hcManager = newHealthcheckManager(engine)
engine.syncClient = newSyncClient(engine)
engine.syncServer = newSyncServer(engine)
return engine
}
// NewEngine returns an initialised Engine struct.
func NewEngine(cfg *config.EngineConfig) *Engine {
ncc, err := ncclient.NewNCC(cfg.NCCSocket)
if err != nil {
log.Fatalf("Failed to create ncc client: %v", err)
}
return newEngineWithNCC(cfg, ncc)
}
// Run starts the Engine.
func (e *Engine) Run() {
log.Infof("Seesaw Engine starting for %s", e.config.ClusterName)
e.initNetwork()
n, err := config.NewNotifier(e.config)
if err != nil {
log.Fatalf("config.NewNotifier() failed: %v", err)
}
e.notifier = n
if e.config.AnycastEnabled {
go e.bgpManager.run()
}
go e.hcManager.run()
go e.syncClient.run()
go e.syncServer.run()
go e.syncRPC()
go e.engineIPC()
go e.gratuitousARP()
e.manager()
}
// Shutdown attempts to perform a graceful shutdown of the engine.
func (e *Engine) Shutdown() {
e.shutdown <- true
}
// haStatus returns the current HA status from the engine.
func (e *Engine) haStatus() seesaw.HAStatus {
e.haManager.statusLock.RLock()
defer e.haManager.statusLock.RUnlock()
return e.haManager.status
}
// queueOverride queues an Override for processing.
func (e *Engine) queueOverride(o seesaw.Override) {
e.overrideChan <- o
}
// setHAState tells the engine what its current HAState should be.
func (e *Engine) setHAState(state spb.HaState) error {
select {
case e.haManager.stateChan <- state:
default:
return fmt.Errorf("state channel if full")
}
return nil
}
// setHAStatus tells the engine what the current HA status is.
func (e *Engine) setHAStatus(status seesaw.HAStatus) error |
// haConfig returns the HAConfig for an engine.
func (e *Engine) haConfig() (*seesaw.HAConfig, error) {
n, err := e.thisNode()
if err != nil {
return nil, err
}
// TODO(jsing): This does not allow for IPv6-only operation.
return &seesaw.HAConfig{
Enabled: n.State != spb.HaState_DISABLED,
LocalAddr: e.config.Node.IPv4Addr,
RemoteAddr: e.config.VRRPDestIP,
Priority: n.Priority,
VRID: e.config.VRID,
}, nil
}
// thisNode returns the Node for the machine on which this engine is running.
func (e *Engine) thisNode() (*seesaw.Node, error) {
e.clusterLock.RLock()
c := e.cluster
e.clusterLock.RUnlock()
if c == nil {
return nil, fmt.Errorf("cluster configuration not loaded")
}
// TODO(jsing): This does not allow for IPv6-only operation.
ip := e.config.Node.IPv4Addr
for _, n := range c.Nodes {
if ip.Equal(n.IPv4Addr) {
return n, nil
}
}
return nil, fmt.Errorf("node %v not configured", ip)
}
// engineIPC starts an RPC server to handle IPC via a Unix Domain socket.
func (e *Engine) engineIPC() {
if err := server.RemoveUnixSocket(e.config.SocketPath); err != nil {
log.Fatalf("Failed to remove socket: %v", err)
}
ln, err := net.Listen("unix", e.config.SocketPath)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
defer os.Remove(e.config.SocketPath)
seesawIPC := rpc.NewServer()
seesawIPC.Register(&SeesawEngine{e})
go server.RPCAccept(ln, seesawIPC)
<-e.shutdownIPC
ln.Close()
e.shutdownIPC <- true
}
// syncRPC starts a server to handle synchronisation RPCs via a TCP socket.
func (e *Engine) syncRPC() {
// TODO(jsing): Make this default to IPv6, if configured.
addr := &net.TCPAddr{
IP: e.config.Node.IPv4Addr,
Port: e.config.SyncPort,
}
ln, err := net.ListenTCP("tcp", addr)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
go e.syncServer.serve(ln)
<-e.shutdownRPC
ln.Close()
e.shutdownRPC <- true
}
// initNetwork initialises the network configuration for load balancing.
func (e *Engine) initNetwork() {
if e.config.AnycastEnabled {
if err := e.ncc.BGPWithdrawAll(); err != nil {
log.Fatalf("Failed to withdraw all BGP advertisements: %v", err)
}
}
if err := e.ncc.IPVSFlush(); err != nil {
log.Fatalf("Failed to flush IPVS table: %v", err)
}
lbCfg := &ncctypes.LBConfig{
ClusterVIP: e.config.ClusterVIP,
DummyInterface: e.config.DummyInterface,
NodeInterface: e.config.NodeInterface,
Node: e.config.Node,
RoutingTableID: e.config.RoutingTableID,
VRID: e.config.VRID,
UseVMAC: e.config.UseVMAC,
}
e.lbInterface = e.ncc.NewLBInterface(e.config.LBInterface, lbCfg)
if err := e.lbInterface.Init(); err != nil {
log.Fatalf("Failed to initialise LB interface: %v", err)
}
if e.config.AnycastEnabled {
e.initAnycast()
}
}
// initAnycast initialises the anycast configuration.
func (e *Engine) initAnycast() {
vips := make([]*seesaw.VIP, 0)
if e.config.ClusterVIP.IPv4Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv4 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
if e.config.ClusterVIP.IPv6Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv6 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
for _, vip := range vips {
if err := e.lbInterface.AddVIP(vip); err != nil {
log.Fatalf("Failed to add VIP %v: %v", vip, err)
}
log.Infof("Advertising BGP route for %v", vip)
if err := e.ncc.BGPAdvertiseVIP(vip.IP.IP()); err != nil {
log.Fatalf("Failed to advertise VIP %v: %v", vip, err)
}
}
}
// gratuitousARP sends gratuitous ARP messages at regular intervals, if this
// node is the HA master.
func (e *Engine) gratuitousARP() {
arpTicker := time.NewTicker(e.config.GratuitousARPInterval)
var announced bool
for {
select {
case <-arpTicker.C:
if e.haManager.state() != spb.HaState_LEADER {
if announced {
log.Info("Stopping gratuitous ARPs")
announced = false
}
continue
}
if !announced {
log.Infof("Starting gratuitous ARPs every %s", e.config.GratuitousARPInterval)
announced = true
}
e.arpLock.Lock()
arpMap := e.arpMap
e.arpLock.Unlock()
if err := e.ncc.ARPSendGratuitous(arpMap); err != nil {
log.Fatalf("Failed to send gratuitous ARP: %v", err)
}
case <-e.shutdownARP:
e.shutdownARP <- true
return
}
}
}
// manager is responsible for managing and co-ordinating various parts of the
// seesaw engine.
func (e *Engine) manager() {
for {
// process ha state updates first before processing others
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
continue
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
continue
default:
}
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
case n := <-e.notifier.C:
log.Infof("Received cluster config notification; %v", &n)
e.syncServer.notify(&SyncNote{Type: SNTConfigUpdate, Time: time.Now()})
vua, err := newVserverUserAccess(n.Cluster)
if err != nil {
log.Errorf("Ignoring notification due to invalid vserver access configuration: %v", err)
return
}
e.clusterLock.Lock()
e.cluster = n.Cluster
e.clusterLock.Unlock()
e.vserverAccess.update(vua)
if n.MetadataOnly {
log.Infof("Only metadata changes found, processing complete.")
continue
}
if ha, err := e.haConfig(); err != nil {
log.Errorf("Manager failed to determine haConfig: %v", err)
} else if ha.Enabled {
e.haManager.enable()
} else {
e.haManager.disable()
}
node, err := e.thisNode()
if err != nil {
log.Errorf("Manager failed to identify local node: %v", err)
continue
}
if !node.VserversEnabled {
e.shutdownVservers()
e.deleteVLANs()
continue
}
// Process new cluster configuration.
e.updateVLANs()
// TODO(jsing): Ensure this does not block.
e.updateVservers()
e.updateARPMap()
case <-e.haManager.timer():
log.Infof("Timed out waiting for HAState")
e.haManager.setState(spb.HaState_UNKNOWN)
case svs := <-e.vserverChan:
if _, ok := e.vservers[svs.Name]; !ok {
log.Infof("Received vserver snapshot for unconfigured vserver %s, ignoring", svs.Name)
break
}
log.V(1).Infof("Updating vserver snapshot for %s", svs.Name)
e.vserverLock.Lock()
e.vserverSnapshots[svs.Name] = svs
e.vserverLock.Unlock()
case override := <-e.overrideChan:
sn := &SyncNote{Type: SNTOverride, Time: time.Now()}
switch o := override.(type) {
case *seesaw.BackendOverride:
sn.BackendOverride = o
case *seesaw.DestinationOverride:
sn.DestinationOverride = o
case *seesaw.VserverOverride:
sn.VserverOverride = o
}
e.syncServer.notify(sn)
e.handleOverride(override)
case <-e.shutdown:
log.Info("Shutting down engine...")
// Tell other components to shutdown and then wait for
// them to do so.
e.shutdownIPC <- true
e.shutdownRPC <- true
<-e.shutdownIPC
<-e.shutdownRPC
e.syncClient.disable()
e.shutdownVservers()
e.hcManager.shutdown()
e.deleteVLANs()
e.ncc.Close()
log.Info("Shutdown complete")
return
}
}
}
// updateVservers processes a list of vserver configurations then stops
// deleted vservers, spawns new vservers and updates the existing vservers.
func (e *Engine) updateVservers() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
// Delete vservers that no longer exist in the new configuration.
for name, vserver := range e.vservers {
if cluster.Vservers[name] == nil {
log.Infof("Stopping unconfigured vserver %s", name)
vserver.stop()
<-vserver.stopped
delete(e.vservers, name)
e.vserverLock.Lock()
delete(e.vserverSnapshots, name)
e.vserverLock.Unlock()
}
}
// Spawn new vservers and provide current configurations.
for _, config := range cluster.Vservers {
if e.vservers[config.Name] == nil {
vserver := newVserver(e)
go vserver.run()
e.vservers[config.Name] = vserver
}
}
for _, override := range e.overrides {
e.distributeOverride(override)
}
for _, config := range cluster.Vservers {
e.vservers[config.Name].updateConfig(config)
}
}
// updateARPMap goes through the new config and updates the internal ARP map so that
// the gratutious arp loop adopts to new changes.
func (e *Engine) updateARPMap() {
arpMap := make(map[string][]net.IP)
defer func() {
e.arpLock.Lock()
defer e.arpLock.Unlock()
e.arpMap = arpMap
}()
arpMap[e.config.LBInterface] = []net.IP{e.config.ClusterVIP.IPv4Addr}
if e.config.UseVMAC {
// If using VMAC, only announce ClusterVIP is enough.
return
}
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
e.vlanLock.RLock()
defer e.vlanLock.RUnlock()
for _, vserver := range cluster.Vservers {
for _, vip := range vserver.VIPs {
if vip.Type == seesaw.AnycastVIP {
continue
}
ip := vip.IP.IP()
if ip.To4() == nil {
// IPv6 address is not yet supported.
continue
}
found := false
for _, vlan := range e.vlans {
ipNet := vlan.IPv4Net()
if ipNet == nil {
continue
}
if ipNet.Contains(ip) {
ifName := fmt.Sprintf("%s.%d", e.config.LBInterface, vlan.ID)
arpMap[ifName] = append(arpMap[ifName], ip)
found = true
break
}
}
if !found {
// Use LB interface if no vlan matches
arpMap[e.config.LBInterface] = append(arpMap[e.config.LBInterface], ip)
}
}
}
}
// shutdownVservers shuts down all running vservers.
func (e *Engine) shutdownVservers() {
for _, v := range e.vservers {
v.stop()
}
for name, v := range e.vservers {
<-v.stopped
delete(e.vservers, name)
}
e.vserverLock.Lock()
e.vserverSnapshots = make(map[string]*seesaw.Vserver)
e.vserverLock.Unlock()
}
// updateVLANs creates and destroys VLAN interfaces for the load balancer per
// the cluster configuration.
func (e *Engine) updateVLANs() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
add := make([]*seesaw.VLAN, 0)
remove := make([]*seesaw.VLAN, 0)
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for key, vlan := range e.vlans {
if cluster.VLANs[key] == nil {
remove = append(remove, vlan)
} else if !vlan.Equal(cluster.VLANs[key]) {
// TODO(angusc): This will break any VIPs that are currently configured
// on the VLAN interface. Fix!
remove = append(remove, vlan)
add = append(add, cluster.VLANs[key])
}
}
for key, vlan := range cluster.VLANs {
if e.vlans[key] == nil {
add = append(add, vlan)
}
}
for _, vlan := range remove {
log.Infof("Removing VLAN interface %v", vlan)
if err := e.lbInterface.DeleteVLAN(vlan); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", vlan, err)
}
}
for _, vlan := range add {
log.Infof("Adding VLAN interface %v", vlan)
if err := e.lbInterface.AddVLAN(vlan); err != nil {
log.Fatalf("Failed to add VLAN interface %v: %v", vlan, err)
}
}
e.vlans = cluster.VLANs
}
// deleteVLANs removes all the VLAN interfaces that have been created by this
// engine.
func (e *Engine) deleteVLANs() {
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for k, v := range e.vlans {
if err := e.lbInterface.DeleteVLAN(v); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", v, err)
}
delete(e.vlans, k)
}
}
// handleOverride handles an incoming Override.
func (e *Engine) handleOverride(o seesaw.Override) {
e.overrides[o.Target()] = o
e.distributeOverride(o)
if o.State() == seesaw.OverrideDefault {
delete(e.overrides, o.Target())
}
}
// distributeOverride distributes an Override to the appropriate vservers.
func (e *Engine) distributeOverride(o seesaw.Override) {
// Send VserverOverrides and DestinationOverrides to the appropriate vserver.
// Send BackendOverrides to all vservers.
switch override := o.(type) {
case *seesaw.VserverOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.DestinationOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.BackendOverride:
for _, vserver := range e.vservers {
vserver.queueOverride(o)
}
}
}
// becomeMaster performs the necessary actions for the Seesaw Engine to
// become the master node.
func (e *Engine) becomeMaster() {
e.syncClient.disable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Up(); err != nil {
log.Fatalf("Failed to bring LB interface up: %v", err)
}
}
// becomeBackup performs the neccesary actions for the Seesaw Engine to
// stop being the master node and become the backup node.
func (e *Engine) becomeBackup() {
e.syncClient.enable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Down(); err != nil {
log.Fatalf("Failed to bring LB interface down: %v", err)
}
}
// markAllocator handles the allocation of marks.
type markAllocator struct {
lock sync.RWMutex
marks []uint32
}
// newMarkAllocator returns a mark allocator initialised with the specified
// base and size.
func newMarkAllocator(base, size int) *markAllocator {
ma := &markAllocator{
marks: make([]uint32, 0, size),
}
for i := 0; i < size; i++ {
ma.put(uint32(base + i))
}
return ma
}
// get returns the next available mark from the mark allocator.
func (ma *markAllocator) get() (uint32, error) {
ma.lock.Lock()
defer ma.lock.Unlock()
if len(ma.marks) == 0 {
return 0, errors.New("allocator exhausted")
}
mark := ma.marks[0]
ma.marks = ma.marks[1:]
return mark, nil
}
// put returns the specified mark to the mark allocator.
func (ma *markAllocator) put(mark uint32) {
ma.lock.Lock()
defer ma.lock.Unlock()
ma.marks = append(ma.marks, mark)
}
| {
select {
case e.haManager.statusChan <- status:
default:
return fmt.Errorf("status channel if full")
}
return nil
} | identifier_body |
core.go | // Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Author: [email protected] (Joel Sing)
/*
Package engine implements the Seesaw v2 engine component, which is
responsible for maintaining configuration information, handling state
transitions and providing communication between Seesaw v2 components.
*/
package engine
import (
"errors"
"fmt"
"net"
"net/rpc"
"os"
"sync"
"time"
"github.com/google/seesaw/common/seesaw"
"github.com/google/seesaw/common/server"
"github.com/google/seesaw/engine/config"
ncclient "github.com/google/seesaw/ncc/client"
ncctypes "github.com/google/seesaw/ncc/types"
spb "github.com/google/seesaw/pb/seesaw"
log "github.com/golang/glog"
)
const (
fwmAllocBase = 1 << 8
fwmAllocSize = 8000
)
// Engine contains the data necessary to run the Seesaw v2 Engine.
type Engine struct {
config *config.EngineConfig
notifier *config.Notifier
fwmAlloc *markAllocator
bgpManager *bgpManager
haManager *haManager
hcManager *healthcheckManager
ncc ncclient.NCC
lbInterface ncclient.LBInterface
cluster *config.Cluster
clusterLock sync.RWMutex
shutdown chan bool
shutdownARP chan bool
shutdownIPC chan bool
shutdownRPC chan bool
syncClient *syncClient
syncServer *syncServer
overrides map[string]seesaw.Override
overrideChan chan seesaw.Override
vlans map[uint16]*seesaw.VLAN
vlanLock sync.RWMutex
vservers map[string]*vserver
vserverAccess *vserverAccess
vserverSnapshots map[string]*seesaw.Vserver
vserverLock sync.RWMutex
vserverChan chan *seesaw.Vserver
startTime time.Time
arpMap map[string][]net.IP // iface name -> IP list
arpLock sync.Mutex
}
func newEngineWithNCC(cfg *config.EngineConfig, ncc ncclient.NCC) *Engine {
if cfg == nil {
defaultCfg := config.DefaultEngineConfig()
cfg = &defaultCfg
}
// TODO(jsing): Validate node, peer and cluster IP configuration.
engine := &Engine{
config: cfg,
fwmAlloc: newMarkAllocator(fwmAllocBase, fwmAllocSize),
ncc: ncc,
overrides: make(map[string]seesaw.Override),
overrideChan: make(chan seesaw.Override),
vlans: make(map[uint16]*seesaw.VLAN),
vservers: make(map[string]*vserver),
shutdown: make(chan bool),
shutdownARP: make(chan bool),
shutdownIPC: make(chan bool),
shutdownRPC: make(chan bool),
vserverAccess: newVserverAccess(),
vserverSnapshots: make(map[string]*seesaw.Vserver),
vserverChan: make(chan *seesaw.Vserver, 1000),
}
engine.bgpManager = newBGPManager(engine, cfg.BGPUpdateInterval)
engine.haManager = newHAManager(engine, cfg.HAStateTimeout)
engine.hcManager = newHealthcheckManager(engine)
engine.syncClient = newSyncClient(engine)
engine.syncServer = newSyncServer(engine)
return engine
}
// NewEngine returns an initialised Engine struct.
func NewEngine(cfg *config.EngineConfig) *Engine {
ncc, err := ncclient.NewNCC(cfg.NCCSocket)
if err != nil {
log.Fatalf("Failed to create ncc client: %v", err)
}
return newEngineWithNCC(cfg, ncc)
}
// Run starts the Engine.
func (e *Engine) Run() { | log.Infof("Seesaw Engine starting for %s", e.config.ClusterName)
e.initNetwork()
n, err := config.NewNotifier(e.config)
if err != nil {
log.Fatalf("config.NewNotifier() failed: %v", err)
}
e.notifier = n
if e.config.AnycastEnabled {
go e.bgpManager.run()
}
go e.hcManager.run()
go e.syncClient.run()
go e.syncServer.run()
go e.syncRPC()
go e.engineIPC()
go e.gratuitousARP()
e.manager()
}
// Shutdown attempts to perform a graceful shutdown of the engine.
func (e *Engine) Shutdown() {
e.shutdown <- true
}
// haStatus returns the current HA status from the engine.
func (e *Engine) haStatus() seesaw.HAStatus {
e.haManager.statusLock.RLock()
defer e.haManager.statusLock.RUnlock()
return e.haManager.status
}
// queueOverride queues an Override for processing.
func (e *Engine) queueOverride(o seesaw.Override) {
e.overrideChan <- o
}
// setHAState tells the engine what its current HAState should be.
func (e *Engine) setHAState(state spb.HaState) error {
select {
case e.haManager.stateChan <- state:
default:
return fmt.Errorf("state channel if full")
}
return nil
}
// setHAStatus tells the engine what the current HA status is.
func (e *Engine) setHAStatus(status seesaw.HAStatus) error {
select {
case e.haManager.statusChan <- status:
default:
return fmt.Errorf("status channel if full")
}
return nil
}
// haConfig returns the HAConfig for an engine.
func (e *Engine) haConfig() (*seesaw.HAConfig, error) {
n, err := e.thisNode()
if err != nil {
return nil, err
}
// TODO(jsing): This does not allow for IPv6-only operation.
return &seesaw.HAConfig{
Enabled: n.State != spb.HaState_DISABLED,
LocalAddr: e.config.Node.IPv4Addr,
RemoteAddr: e.config.VRRPDestIP,
Priority: n.Priority,
VRID: e.config.VRID,
}, nil
}
// thisNode returns the Node for the machine on which this engine is running.
func (e *Engine) thisNode() (*seesaw.Node, error) {
e.clusterLock.RLock()
c := e.cluster
e.clusterLock.RUnlock()
if c == nil {
return nil, fmt.Errorf("cluster configuration not loaded")
}
// TODO(jsing): This does not allow for IPv6-only operation.
ip := e.config.Node.IPv4Addr
for _, n := range c.Nodes {
if ip.Equal(n.IPv4Addr) {
return n, nil
}
}
return nil, fmt.Errorf("node %v not configured", ip)
}
// engineIPC starts an RPC server to handle IPC via a Unix Domain socket.
func (e *Engine) engineIPC() {
if err := server.RemoveUnixSocket(e.config.SocketPath); err != nil {
log.Fatalf("Failed to remove socket: %v", err)
}
ln, err := net.Listen("unix", e.config.SocketPath)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
defer os.Remove(e.config.SocketPath)
seesawIPC := rpc.NewServer()
seesawIPC.Register(&SeesawEngine{e})
go server.RPCAccept(ln, seesawIPC)
<-e.shutdownIPC
ln.Close()
e.shutdownIPC <- true
}
// syncRPC starts a server to handle synchronisation RPCs via a TCP socket.
func (e *Engine) syncRPC() {
// TODO(jsing): Make this default to IPv6, if configured.
addr := &net.TCPAddr{
IP: e.config.Node.IPv4Addr,
Port: e.config.SyncPort,
}
ln, err := net.ListenTCP("tcp", addr)
if err != nil {
log.Fatalf("Listen failed: %v", err)
}
go e.syncServer.serve(ln)
<-e.shutdownRPC
ln.Close()
e.shutdownRPC <- true
}
// initNetwork initialises the network configuration for load balancing.
func (e *Engine) initNetwork() {
if e.config.AnycastEnabled {
if err := e.ncc.BGPWithdrawAll(); err != nil {
log.Fatalf("Failed to withdraw all BGP advertisements: %v", err)
}
}
if err := e.ncc.IPVSFlush(); err != nil {
log.Fatalf("Failed to flush IPVS table: %v", err)
}
lbCfg := &ncctypes.LBConfig{
ClusterVIP: e.config.ClusterVIP,
DummyInterface: e.config.DummyInterface,
NodeInterface: e.config.NodeInterface,
Node: e.config.Node,
RoutingTableID: e.config.RoutingTableID,
VRID: e.config.VRID,
UseVMAC: e.config.UseVMAC,
}
e.lbInterface = e.ncc.NewLBInterface(e.config.LBInterface, lbCfg)
if err := e.lbInterface.Init(); err != nil {
log.Fatalf("Failed to initialise LB interface: %v", err)
}
if e.config.AnycastEnabled {
e.initAnycast()
}
}
// initAnycast initialises the anycast configuration.
func (e *Engine) initAnycast() {
vips := make([]*seesaw.VIP, 0)
if e.config.ClusterVIP.IPv4Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv4 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
if e.config.ClusterVIP.IPv6Addr != nil {
for _, ip := range e.config.ServiceAnycastIPv6 {
vips = append(vips, seesaw.NewVIP(ip, nil))
}
}
for _, vip := range vips {
if err := e.lbInterface.AddVIP(vip); err != nil {
log.Fatalf("Failed to add VIP %v: %v", vip, err)
}
log.Infof("Advertising BGP route for %v", vip)
if err := e.ncc.BGPAdvertiseVIP(vip.IP.IP()); err != nil {
log.Fatalf("Failed to advertise VIP %v: %v", vip, err)
}
}
}
// gratuitousARP sends gratuitous ARP messages at regular intervals, if this
// node is the HA master.
func (e *Engine) gratuitousARP() {
arpTicker := time.NewTicker(e.config.GratuitousARPInterval)
var announced bool
for {
select {
case <-arpTicker.C:
if e.haManager.state() != spb.HaState_LEADER {
if announced {
log.Info("Stopping gratuitous ARPs")
announced = false
}
continue
}
if !announced {
log.Infof("Starting gratuitous ARPs every %s", e.config.GratuitousARPInterval)
announced = true
}
e.arpLock.Lock()
arpMap := e.arpMap
e.arpLock.Unlock()
if err := e.ncc.ARPSendGratuitous(arpMap); err != nil {
log.Fatalf("Failed to send gratuitous ARP: %v", err)
}
case <-e.shutdownARP:
e.shutdownARP <- true
return
}
}
}
// manager is responsible for managing and co-ordinating various parts of the
// seesaw engine.
func (e *Engine) manager() {
for {
// process ha state updates first before processing others
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
continue
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
continue
default:
}
select {
case state := <-e.haManager.stateChan:
log.Infof("Received HA state notification %v", state)
e.haManager.setState(state)
case status := <-e.haManager.statusChan:
log.V(1).Infof("Received HA status notification (%v)", status.State)
e.haManager.setStatus(status)
case n := <-e.notifier.C:
log.Infof("Received cluster config notification; %v", &n)
e.syncServer.notify(&SyncNote{Type: SNTConfigUpdate, Time: time.Now()})
vua, err := newVserverUserAccess(n.Cluster)
if err != nil {
log.Errorf("Ignoring notification due to invalid vserver access configuration: %v", err)
return
}
e.clusterLock.Lock()
e.cluster = n.Cluster
e.clusterLock.Unlock()
e.vserverAccess.update(vua)
if n.MetadataOnly {
log.Infof("Only metadata changes found, processing complete.")
continue
}
if ha, err := e.haConfig(); err != nil {
log.Errorf("Manager failed to determine haConfig: %v", err)
} else if ha.Enabled {
e.haManager.enable()
} else {
e.haManager.disable()
}
node, err := e.thisNode()
if err != nil {
log.Errorf("Manager failed to identify local node: %v", err)
continue
}
if !node.VserversEnabled {
e.shutdownVservers()
e.deleteVLANs()
continue
}
// Process new cluster configuration.
e.updateVLANs()
// TODO(jsing): Ensure this does not block.
e.updateVservers()
e.updateARPMap()
case <-e.haManager.timer():
log.Infof("Timed out waiting for HAState")
e.haManager.setState(spb.HaState_UNKNOWN)
case svs := <-e.vserverChan:
if _, ok := e.vservers[svs.Name]; !ok {
log.Infof("Received vserver snapshot for unconfigured vserver %s, ignoring", svs.Name)
break
}
log.V(1).Infof("Updating vserver snapshot for %s", svs.Name)
e.vserverLock.Lock()
e.vserverSnapshots[svs.Name] = svs
e.vserverLock.Unlock()
case override := <-e.overrideChan:
sn := &SyncNote{Type: SNTOverride, Time: time.Now()}
switch o := override.(type) {
case *seesaw.BackendOverride:
sn.BackendOverride = o
case *seesaw.DestinationOverride:
sn.DestinationOverride = o
case *seesaw.VserverOverride:
sn.VserverOverride = o
}
e.syncServer.notify(sn)
e.handleOverride(override)
case <-e.shutdown:
log.Info("Shutting down engine...")
// Tell other components to shutdown and then wait for
// them to do so.
e.shutdownIPC <- true
e.shutdownRPC <- true
<-e.shutdownIPC
<-e.shutdownRPC
e.syncClient.disable()
e.shutdownVservers()
e.hcManager.shutdown()
e.deleteVLANs()
e.ncc.Close()
log.Info("Shutdown complete")
return
}
}
}
// updateVservers processes a list of vserver configurations then stops
// deleted vservers, spawns new vservers and updates the existing vservers.
func (e *Engine) updateVservers() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
// Delete vservers that no longer exist in the new configuration.
for name, vserver := range e.vservers {
if cluster.Vservers[name] == nil {
log.Infof("Stopping unconfigured vserver %s", name)
vserver.stop()
<-vserver.stopped
delete(e.vservers, name)
e.vserverLock.Lock()
delete(e.vserverSnapshots, name)
e.vserverLock.Unlock()
}
}
// Spawn new vservers and provide current configurations.
for _, config := range cluster.Vservers {
if e.vservers[config.Name] == nil {
vserver := newVserver(e)
go vserver.run()
e.vservers[config.Name] = vserver
}
}
for _, override := range e.overrides {
e.distributeOverride(override)
}
for _, config := range cluster.Vservers {
e.vservers[config.Name].updateConfig(config)
}
}
// updateARPMap goes through the new config and updates the internal ARP map so that
// the gratutious arp loop adopts to new changes.
func (e *Engine) updateARPMap() {
arpMap := make(map[string][]net.IP)
defer func() {
e.arpLock.Lock()
defer e.arpLock.Unlock()
e.arpMap = arpMap
}()
arpMap[e.config.LBInterface] = []net.IP{e.config.ClusterVIP.IPv4Addr}
if e.config.UseVMAC {
// If using VMAC, only announce ClusterVIP is enough.
return
}
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
e.vlanLock.RLock()
defer e.vlanLock.RUnlock()
for _, vserver := range cluster.Vservers {
for _, vip := range vserver.VIPs {
if vip.Type == seesaw.AnycastVIP {
continue
}
ip := vip.IP.IP()
if ip.To4() == nil {
// IPv6 address is not yet supported.
continue
}
found := false
for _, vlan := range e.vlans {
ipNet := vlan.IPv4Net()
if ipNet == nil {
continue
}
if ipNet.Contains(ip) {
ifName := fmt.Sprintf("%s.%d", e.config.LBInterface, vlan.ID)
arpMap[ifName] = append(arpMap[ifName], ip)
found = true
break
}
}
if !found {
// Use LB interface if no vlan matches
arpMap[e.config.LBInterface] = append(arpMap[e.config.LBInterface], ip)
}
}
}
}
// shutdownVservers shuts down all running vservers.
func (e *Engine) shutdownVservers() {
for _, v := range e.vservers {
v.stop()
}
for name, v := range e.vservers {
<-v.stopped
delete(e.vservers, name)
}
e.vserverLock.Lock()
e.vserverSnapshots = make(map[string]*seesaw.Vserver)
e.vserverLock.Unlock()
}
// updateVLANs creates and destroys VLAN interfaces for the load balancer per
// the cluster configuration.
func (e *Engine) updateVLANs() {
e.clusterLock.RLock()
cluster := e.cluster
e.clusterLock.RUnlock()
add := make([]*seesaw.VLAN, 0)
remove := make([]*seesaw.VLAN, 0)
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for key, vlan := range e.vlans {
if cluster.VLANs[key] == nil {
remove = append(remove, vlan)
} else if !vlan.Equal(cluster.VLANs[key]) {
// TODO(angusc): This will break any VIPs that are currently configured
// on the VLAN interface. Fix!
remove = append(remove, vlan)
add = append(add, cluster.VLANs[key])
}
}
for key, vlan := range cluster.VLANs {
if e.vlans[key] == nil {
add = append(add, vlan)
}
}
for _, vlan := range remove {
log.Infof("Removing VLAN interface %v", vlan)
if err := e.lbInterface.DeleteVLAN(vlan); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", vlan, err)
}
}
for _, vlan := range add {
log.Infof("Adding VLAN interface %v", vlan)
if err := e.lbInterface.AddVLAN(vlan); err != nil {
log.Fatalf("Failed to add VLAN interface %v: %v", vlan, err)
}
}
e.vlans = cluster.VLANs
}
// deleteVLANs removes all the VLAN interfaces that have been created by this
// engine.
func (e *Engine) deleteVLANs() {
e.vlanLock.Lock()
defer e.vlanLock.Unlock()
for k, v := range e.vlans {
if err := e.lbInterface.DeleteVLAN(v); err != nil {
log.Fatalf("Failed to remove VLAN interface %v: %v", v, err)
}
delete(e.vlans, k)
}
}
// handleOverride handles an incoming Override.
func (e *Engine) handleOverride(o seesaw.Override) {
e.overrides[o.Target()] = o
e.distributeOverride(o)
if o.State() == seesaw.OverrideDefault {
delete(e.overrides, o.Target())
}
}
// distributeOverride distributes an Override to the appropriate vservers.
func (e *Engine) distributeOverride(o seesaw.Override) {
// Send VserverOverrides and DestinationOverrides to the appropriate vserver.
// Send BackendOverrides to all vservers.
switch override := o.(type) {
case *seesaw.VserverOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.DestinationOverride:
if vserver, ok := e.vservers[override.VserverName]; ok {
vserver.queueOverride(o)
}
case *seesaw.BackendOverride:
for _, vserver := range e.vservers {
vserver.queueOverride(o)
}
}
}
// becomeMaster performs the necessary actions for the Seesaw Engine to
// become the master node.
func (e *Engine) becomeMaster() {
e.syncClient.disable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Up(); err != nil {
log.Fatalf("Failed to bring LB interface up: %v", err)
}
}
// becomeBackup performs the neccesary actions for the Seesaw Engine to
// stop being the master node and become the backup node.
func (e *Engine) becomeBackup() {
e.syncClient.enable()
e.notifier.SetSource(config.SourceServer)
if err := e.lbInterface.Down(); err != nil {
log.Fatalf("Failed to bring LB interface down: %v", err)
}
}
// markAllocator handles the allocation of marks.
type markAllocator struct {
lock sync.RWMutex
marks []uint32
}
// newMarkAllocator returns a mark allocator initialised with the specified
// base and size.
func newMarkAllocator(base, size int) *markAllocator {
ma := &markAllocator{
marks: make([]uint32, 0, size),
}
for i := 0; i < size; i++ {
ma.put(uint32(base + i))
}
return ma
}
// get returns the next available mark from the mark allocator.
func (ma *markAllocator) get() (uint32, error) {
ma.lock.Lock()
defer ma.lock.Unlock()
if len(ma.marks) == 0 {
return 0, errors.New("allocator exhausted")
}
mark := ma.marks[0]
ma.marks = ma.marks[1:]
return mark, nil
}
// put returns the specified mark to the mark allocator.
func (ma *markAllocator) put(mark uint32) {
ma.lock.Lock()
defer ma.lock.Unlock()
ma.marks = append(ma.marks, mark)
} | random_line_split |
|
lib.rs | #![deny(warnings)]
pub mod corgi;
pub mod dict;
#[cfg(test)]
pub mod tests;
use crate::corgi::{decode, encode, Corgi, CorgiDTO, CorgiId, CorgiKey, Rarity};
use crate::dict::Dict;
use near_env::near_envlog;
use near_sdk::{
borsh::{self, BorshDeserialize, BorshSerialize},
collections::UnorderedMap,
env,
json_types::U64,
near_bindgen,
wee_alloc::WeeAlloc,
AccountId, Balance, Promise,
};
use std::{convert::TryInto, mem::size_of, usize};
#[global_allocator]
static ALLOC: WeeAlloc = WeeAlloc::INIT;
/// Fee to pay (in yocto Ⓝ) to allow the user to store Corgis on-chain.
/// This value can be set by modifiying the `mint_fee` field in `config.json`.
const MINT_FEE: u128 = include!(concat!(env!("OUT_DIR"), "/mint_fee.val"));
/// Indicates how many Corgi are returned at most in the `get_global_corgis` method.
/// This value can be set by modifiying the `page_limit` field in `config.json`.
const PAGE_LIMIT: u32 = include!(concat!(env!("OUT_DIR"), "/page_limit.val"));
/// Keys used to identify our structures within the NEAR blockchain.
const CORGIS: &[u8] = b"a";
const CORGIS_BY_OWNER: &[u8] = b"b";
const CORGIS_BY_OWNER_PREFIX: &str = "B";
const AUCTIONS: &[u8] = b"d";
const AUCTIONS_PREFIX: &str = "D";
/// Holds our data model.
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Model {
/// A mapping from `CorgiKey` to `Corgi` to have quick access to corgis.
/// `Dict` is used to keep corgis sorted by creation timestamp.
corgis: Dict<CorgiKey, Corgi>,
/// Represents which account holds which `Corgi`.
/// Each account can own several corgis.
/// The inner `Dict` acts as a set, since it is mapped to `()`.
corgis_by_owner: UnorderedMap<AccountId, Dict<CorgiKey, ()>>,
/// Internal structure to store auctions for a given corgi.
/// It is a mapping from `CorgiKey` to a tuple.
/// The first component of the tuple is a `Dict`, which represents the bids for that corgi.
/// Each entry in this `Dict` maps the bidder (`AccountId`) to the bid price and bidding timestamp.
/// The seconds component of the tuple represents the expiration of the auction,
/// as a timestamp in nanoseconds.
auctions: UnorderedMap<CorgiKey, (Dict<AccountId, (Balance, u64)>, u64)>,
}
impl Default for Model {
fn default() -> Self {
env::log(format!("init v{}", env!("CARGO_PKG_VERSION")).as_bytes());
Self {
corgis: Dict::new(CORGIS.to_vec()),
corgis_by_owner: UnorderedMap::new(CORGIS_BY_OWNER.to_vec()),
auctions: UnorderedMap::new(AUCTIONS.to_vec()),
}
}
}
#[near_bindgen]
#[near_envlog(skip_args, only_pub)]
impl Model {
/// Creates a `Corgi` under the `predecessor_account_id`.
/// Returns the newly generated `Corgi`
/// The corgi `id` is encoded using base58.
/// This method is `payable` because the caller needs to cover the cost to mint the corgi.
/// The corresponding `attached_deposit` must be `MINT_FEE`.
#[payable]
pub fn create_corgi(
&mut self,
name: String,
quote: String,
color: String,
background_color: String,
) -> CorgiDTO {
let owner = env::predecessor_account_id();
let deposit = env::attached_deposit();
if deposit != MINT_FEE {
panic!("Deposit must be MINT_FEE but was {}", deposit)
}
macro_rules! check {
($value:ident, $max:expr, $message:expr) => {{
if $value.len() > $max {
env::panic($message.as_bytes());
}
}};
}
check!(name, 32, "Name too large");
check!(quote, 256, "Quote too large");
check!(color, 64, "Color too large");
check!(background_color, 64, "Backcolor too large");
let now = env::block_timestamp();
let key = env::random_seed()[..size_of::<CorgiKey>()]
.try_into()
.unwrap();
let corgi = Corgi {
id: encode(key),
name,
quote,
color,
background_color,
rate: Rarity::from_seed(env::random_seed()),
owner,
created: now,
modified: now,
sender: "".to_string(),
};
CorgiDTO::new(self.push_corgi(key, corgi))
}
/// Gets `Corgi` by the given `id`.
/// Panics if `id` is not found.
pub fn get_corgi_by_id(&self, id: CorgiId) -> CorgiDTO {
let (key, corgi) = self.get_corgi(&id);
self.get_for_sale(key, corgi)
}
/// Gets all `Corgi`s owned by the `owner` account id.
/// Empty `vec` if `owner` does not hold any `Corgi`.
pub fn get_corgis_by_owner(&self, owner: AccountId) -> Vec<CorgiDTO> {
match self.corgis_by_owner.get(&owner) {
None => Vec::new(),
Some(list) => list
.into_iter()
.map(|(key, _)| {
let maybe_corgi = self.corgis.get(&key);
assert!(maybe_corgi.is_some());
let corgi = maybe_corgi.unwrap();
assert!(corgi.id == encode(key));
assert!(corgi.owner == owner);
self.get_for_sale(key, corgi)
})
.collect(),
}
}
/// Delete the `Corgi` by its `id`.
/// Only the `owner` of the `Corgi` can delete it.
pub fn delete_corgi(&mut self, id: CorgiId) {
let owner = env::predecessor_account_id();
self.delete_corgi_from(id, owner);
}
/// Internal method to delete the corgi with `id` owned by `owner`.
/// Panics if `owner` does not own the corgi with `id`.
fn delete_corgi_from(&mut self, id: CorgiId, owner: AccountId) {
match self.corgis_by_owner.get(&owner) {
None => env::panic("You do not have corgis to delete from".as_bytes()),
Some(mut list) => {
let key = decode(&id);
self.panic_if_corgi_is_locked(key);
if list.remove(&key).is_none() {
env::panic("Corgi id does not belong to account".as_bytes());
}
self.corgis_by_owner.insert(&owner, &list);
let was_removed = self.corgis.remove(&key);
assert!(was_removed.is_some());
}
}
}
/// Returns a list of all `Corgi`s that have been created.
/// Number of `Corgi`s returned is limited by `PAGE_LIMIT`.
pub fn get_global_corgis(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, corgi) in &self.corgis {
if result.len() >= PAGE_LIMIT as usize {
break;
}
result.push(self.get_for_sale(key, corgi));
}
result
}
/// Transfer the Corgi with the given `id` to `receiver`.
/// Only the `owner` of the corgi can make such a transfer.
pub fn transfer_corgi(&mut self, receiver: AccountId, id: CorgiId) {
if !env::is_valid_account_id(receiver.as_bytes()) {
env::panic("Invalid receiver account id".as_bytes());
}
let sender = env::predecessor_account_id();
if sender == receiver {
env::panic("Self transfers are not allowed".as_bytes());
}
let (key, corgi) = self.get_corgi(&id);
assert_eq!(corgi.id, id);
if sender != corgi.owner {
env::panic("Sender must own Corgi".as_bytes());
}
self.panic_if_corgi_is_locked(key);
self.move_corgi(key, id, sender, receiver, corgi)
}
/// Returns all `Corgi`s currently for sale.
/// That is, all `Corgi`s which are in auction.
pub fn get_items_for_sale(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, item) in self.auctions.iter() {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
let corgi = corgi.unwrap();
result.push(CorgiDTO::for_sale(corgi, item));
}
result
}
/// Puts the given `Corgi` for sale.
/// The `duration` indicates for how long the auction should last, in seconds.
pub fn add_item_for_sale(&mut self, token_id: CorgiId, duration: u32) -> U64 {
let (key, corgi) = self.get_corgi(&token_id);
if corgi.owner != env::predecessor_account_id() {
env::panic("Only token owner can add item for sale".as_bytes())
}
if let None = self.auctions.get(&key) {
let bids = Dict::new(get_collection_key(AUCTIONS_PREFIX, token_id));
let expires = env::block_timestamp() + duration as u64 * 1_000_000_000;
self.auctions.insert(&key, &(bids, expires));
U64(expires)
} else {
env::panic("Corgi already for sale".as_bytes());
}
}
/// Makes a bid for a `Corgi` already in auction.
/// This is a `payable` method, meaning the contract will escrow the `attached_deposit`
/// until the auction ends.
#[payable]
pub fn bid_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let bidder = env::predecessor_account_id();
if bidder == self.corgis.get(&key).expect("Corgi not found").owner {
env::panic("You cannot bid for your own Corgi".as_bytes())
}
if env::block_timestamp() > auction_ends {
env::panic("Auction for corgi has expired".as_bytes())
}
let price = env::attached_deposit() + bids.get(&bidder).map(|(p, _)| p).unwrap_or_default();
let top_price = bids.into_iter().next().map(|(_, (p, _))| p).unwrap_or(0);
if price <= top_price {
panic!("Bid {} does not cover top bid {}", price, top_price)
}
bids.remove(&bidder);
bids.push_front(&bidder, (price, env::block_timestamp()));
self.auctions.insert(&key, &(bids, auction_ends));
}
/// Makes a clearance for the given `Corgi`.
/// Only the corgi `owner` or the highest bidder can end an auction after it expires.
/// All other bidders can get their money back when calling this method.
pub fn clearance_for_item(&mut self, token_id: CorgiId) {
| /// Internal method to transfer a corgi.
fn move_corgi(
&mut self,
key: CorgiKey,
id: CorgiId,
old_owner: AccountId,
new_owner: AccountId,
mut corgi: Corgi,
) {
self.delete_corgi_from(id, old_owner.clone());
corgi.owner = new_owner;
corgi.sender = old_owner;
corgi.modified = env::block_timestamp();
self.push_corgi(key, corgi);
}
/// Gets the `Corgi` with `id`.
fn get_corgi(&self, id: &CorgiId) -> (CorgiKey, Corgi) {
let key = decode(id);
match self.corgis.get(&key) {
None => env::panic("Given corgi id was not found".as_bytes()),
Some(corgi) => {
assert!(corgi.id == *id);
(key, corgi)
}
}
}
/// Gets auction information for the `Corgi` with `token_id` or panics.
fn get_auction(&self, token_id: &CorgiId) -> (CorgiKey, Dict<AccountId, (u128, u64)>, u64) {
let key = decode(&token_id);
match self.auctions.get(&key) {
None => env::panic("Corgi is not available for sale".as_bytes()),
Some((bids, expires)) => (key, bids, expires),
}
}
/// Gets sale information for a given `Corgi`.
fn get_for_sale(&self, key: CorgiKey, corgi: Corgi) -> CorgiDTO {
match self.auctions.get(&key) {
None => CorgiDTO::new(corgi),
Some(item) => CorgiDTO::for_sale(corgi, item),
}
}
/// Inserts a `Corgi` into the top the dictionary.
fn push_corgi(&mut self, key: CorgiKey, corgi: Corgi) -> Corgi {
env::log("push_corgi".as_bytes());
let corgi = self.corgis.push_front(&key, corgi);
let mut ids = self.corgis_by_owner.get(&corgi.owner).unwrap_or_else(|| {
Dict::new(get_collection_key(
CORGIS_BY_OWNER_PREFIX,
corgi.owner.clone(),
))
});
ids.push_front(&key, ());
self.corgis_by_owner.insert(&corgi.owner, &ids);
corgi
}
/// Ensures the given `Corgi` with `key` is not for sale.
fn panic_if_corgi_is_locked(&self, key: CorgiKey) {
if self.auctions.get(&key).is_some() {
env::panic("Corgi is currently locked".as_bytes());
}
}
}
fn get_collection_key(prefix: &str, mut key: String) -> Vec<u8> {
key.insert_str(0, prefix);
key.as_bytes().to_vec()
}
| let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let corgi = {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
corgi.unwrap()
};
let owner = corgi.owner.clone();
let end_auction = |it, bidder, price| {
if env::block_timestamp() <= auction_ends {
env::panic("Token still in auction".as_bytes())
}
self.auctions.remove(&key);
self.move_corgi(key, token_id, owner.clone(), bidder, corgi);
Promise::new(owner.clone()).transfer(price);
for (bidder, (price, _timestamp)) in it {
Promise::new(bidder).transfer(price);
}
};
let mut it = bids.into_iter();
let signer = env::predecessor_account_id();
if signer == owner.clone() {
if let Some((bidder, (price, _timestamp))) = it.next() {
end_auction(it, bidder, price);
} else {
self.auctions.remove(&key);
}
} else {
if let Some((bidder, (price, _timestamp))) = it.next() {
if bidder == signer {
end_auction(it, bidder, price);
return;
}
}
match bids.remove(&signer) {
None => env::panic("Cannot clear an item if not bidding for it".as_bytes()),
Some((price, _)) => Promise::new(signer).transfer(price),
};
}
}
| identifier_body |
lib.rs | #![deny(warnings)]
pub mod corgi;
pub mod dict;
#[cfg(test)]
pub mod tests;
use crate::corgi::{decode, encode, Corgi, CorgiDTO, CorgiId, CorgiKey, Rarity};
use crate::dict::Dict;
use near_env::near_envlog;
use near_sdk::{
borsh::{self, BorshDeserialize, BorshSerialize},
collections::UnorderedMap,
env,
json_types::U64,
near_bindgen,
wee_alloc::WeeAlloc,
AccountId, Balance, Promise,
};
use std::{convert::TryInto, mem::size_of, usize};
#[global_allocator]
static ALLOC: WeeAlloc = WeeAlloc::INIT;
/// Fee to pay (in yocto Ⓝ) to allow the user to store Corgis on-chain.
/// This value can be set by modifiying the `mint_fee` field in `config.json`.
const MINT_FEE: u128 = include!(concat!(env!("OUT_DIR"), "/mint_fee.val"));
/// Indicates how many Corgi are returned at most in the `get_global_corgis` method.
/// This value can be set by modifiying the `page_limit` field in `config.json`.
const PAGE_LIMIT: u32 = include!(concat!(env!("OUT_DIR"), "/page_limit.val"));
/// Keys used to identify our structures within the NEAR blockchain.
const CORGIS: &[u8] = b"a";
const CORGIS_BY_OWNER: &[u8] = b"b";
const CORGIS_BY_OWNER_PREFIX: &str = "B";
const AUCTIONS: &[u8] = b"d";
const AUCTIONS_PREFIX: &str = "D";
/// Holds our data model.
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Model {
/// A mapping from `CorgiKey` to `Corgi` to have quick access to corgis.
/// `Dict` is used to keep corgis sorted by creation timestamp.
corgis: Dict<CorgiKey, Corgi>,
/// Represents which account holds which `Corgi`.
/// Each account can own several corgis.
/// The inner `Dict` acts as a set, since it is mapped to `()`.
corgis_by_owner: UnorderedMap<AccountId, Dict<CorgiKey, ()>>,
/// Internal structure to store auctions for a given corgi.
/// It is a mapping from `CorgiKey` to a tuple.
/// The first component of the tuple is a `Dict`, which represents the bids for that corgi.
/// Each entry in this `Dict` maps the bidder (`AccountId`) to the bid price and bidding timestamp.
/// The seconds component of the tuple represents the expiration of the auction,
/// as a timestamp in nanoseconds.
auctions: UnorderedMap<CorgiKey, (Dict<AccountId, (Balance, u64)>, u64)>,
}
impl Default for Model {
fn default() -> Self {
env::log(format!("init v{}", env!("CARGO_PKG_VERSION")).as_bytes());
Self {
corgis: Dict::new(CORGIS.to_vec()),
corgis_by_owner: UnorderedMap::new(CORGIS_BY_OWNER.to_vec()),
auctions: UnorderedMap::new(AUCTIONS.to_vec()),
}
}
}
#[near_bindgen]
#[near_envlog(skip_args, only_pub)]
impl Model {
/// Creates a `Corgi` under the `predecessor_account_id`.
/// Returns the newly generated `Corgi`
/// The corgi `id` is encoded using base58.
/// This method is `payable` because the caller needs to cover the cost to mint the corgi.
/// The corresponding `attached_deposit` must be `MINT_FEE`.
#[payable]
pub fn create_corgi(
&mut self,
name: String,
quote: String,
color: String,
background_color: String,
) -> CorgiDTO {
let owner = env::predecessor_account_id();
let deposit = env::attached_deposit();
if deposit != MINT_FEE {
panic!("Deposit must be MINT_FEE but was {}", deposit)
}
macro_rules! check {
($value:ident, $max:expr, $message:expr) => {{
if $value.len() > $max {
env::panic($message.as_bytes());
}
}};
}
check!(name, 32, "Name too large");
check!(quote, 256, "Quote too large");
check!(color, 64, "Color too large");
check!(background_color, 64, "Backcolor too large");
let now = env::block_timestamp();
let key = env::random_seed()[..size_of::<CorgiKey>()]
.try_into()
.unwrap();
let corgi = Corgi {
id: encode(key),
name,
quote,
color,
background_color,
rate: Rarity::from_seed(env::random_seed()),
owner,
created: now,
modified: now,
sender: "".to_string(),
};
CorgiDTO::new(self.push_corgi(key, corgi))
}
/// Gets `Corgi` by the given `id`.
/// Panics if `id` is not found.
pub fn get_corgi_by_id(&self, id: CorgiId) -> CorgiDTO {
let (key, corgi) = self.get_corgi(&id);
self.get_for_sale(key, corgi)
}
/// Gets all `Corgi`s owned by the `owner` account id.
/// Empty `vec` if `owner` does not hold any `Corgi`.
pub fn get_corgis_by_owner(&self, owner: AccountId) -> Vec<CorgiDTO> {
match self.corgis_by_owner.get(&owner) {
None => Vec::new(),
Some(list) => list
.into_iter()
.map(|(key, _)| {
let maybe_corgi = self.corgis.get(&key);
assert!(maybe_corgi.is_some());
let corgi = maybe_corgi.unwrap();
assert!(corgi.id == encode(key));
assert!(corgi.owner == owner);
self.get_for_sale(key, corgi)
})
.collect(),
}
}
/// Delete the `Corgi` by its `id`.
/// Only the `owner` of the `Corgi` can delete it.
pub fn delete_corgi(&mut self, id: CorgiId) {
let owner = env::predecessor_account_id();
self.delete_corgi_from(id, owner);
}
/// Internal method to delete the corgi with `id` owned by `owner`.
/// Panics if `owner` does not own the corgi with `id`.
fn delete_corgi_from(&mut self, id: CorgiId, owner: AccountId) {
match self.corgis_by_owner.get(&owner) {
None => env::panic("You do not have corgis to delete from".as_bytes()),
Some(mut list) => {
let key = decode(&id);
self.panic_if_corgi_is_locked(key);
if list.remove(&key).is_none() {
env::panic("Corgi id does not belong to account".as_bytes());
}
self.corgis_by_owner.insert(&owner, &list);
let was_removed = self.corgis.remove(&key);
assert!(was_removed.is_some());
}
}
}
/// Returns a list of all `Corgi`s that have been created.
/// Number of `Corgi`s returned is limited by `PAGE_LIMIT`.
pub fn get_global_corgis(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, corgi) in &self.corgis {
if result.len() >= PAGE_LIMIT as usize {
break;
}
result.push(self.get_for_sale(key, corgi));
}
result
}
/// Transfer the Corgi with the given `id` to `receiver`.
/// Only the `owner` of the corgi can make such a transfer.
pub fn transfer_corgi(&mut self, receiver: AccountId, id: CorgiId) {
if !env::is_valid_account_id(receiver.as_bytes()) {
env::panic("Invalid receiver account id".as_bytes());
}
let sender = env::predecessor_account_id();
if sender == receiver {
env::panic("Self transfers are not allowed".as_bytes());
}
let (key, corgi) = self.get_corgi(&id);
assert_eq!(corgi.id, id);
if sender != corgi.owner {
env::panic("Sender must own Corgi".as_bytes());
}
self.panic_if_corgi_is_locked(key);
self.move_corgi(key, id, sender, receiver, corgi)
}
/// Returns all `Corgi`s currently for sale.
/// That is, all `Corgi`s which are in auction.
pub fn get_items_for_sale(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, item) in self.auctions.iter() {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
let corgi = corgi.unwrap();
result.push(CorgiDTO::for_sale(corgi, item));
}
result
}
/// Puts the given `Corgi` for sale.
/// The `duration` indicates for how long the auction should last, in seconds.
pub fn add_item_for_sale(&mut self, token_id: CorgiId, duration: u32) -> U64 {
let (key, corgi) = self.get_corgi(&token_id);
if corgi.owner != env::predecessor_account_id() {
env::panic("Only token owner can add item for sale".as_bytes())
}
if let None = self.auctions.get(&key) {
let bids = Dict::new(get_collection_key(AUCTIONS_PREFIX, token_id));
let expires = env::block_timestamp() + duration as u64 * 1_000_000_000;
self.auctions.insert(&key, &(bids, expires));
U64(expires)
} else {
env::panic("Corgi already for sale".as_bytes());
}
}
/// Makes a bid for a `Corgi` already in auction.
/// This is a `payable` method, meaning the contract will escrow the `attached_deposit`
/// until the auction ends.
#[payable]
pub fn bid_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let bidder = env::predecessor_account_id();
if bidder == self.corgis.get(&key).expect("Corgi not found").owner {
env::panic("You cannot bid for your own Corgi".as_bytes())
}
if env::block_timestamp() > auction_ends {
env::panic("Auction for corgi has expired".as_bytes())
}
let price = env::attached_deposit() + bids.get(&bidder).map(|(p, _)| p).unwrap_or_default();
let top_price = bids.into_iter().next().map(|(_, (p, _))| p).unwrap_or(0);
if price <= top_price {
panic!("Bid {} does not cover top bid {}", price, top_price)
}
bids.remove(&bidder);
bids.push_front(&bidder, (price, env::block_timestamp()));
self.auctions.insert(&key, &(bids, auction_ends));
}
/// Makes a clearance for the given `Corgi`.
/// Only the corgi `owner` or the highest bidder can end an auction after it expires.
/// All other bidders can get their money back when calling this method.
pub fn clearance_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let corgi = {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
corgi.unwrap()
};
let owner = corgi.owner.clone();
let end_auction = |it, bidder, price| {
if env::block_timestamp() <= auction_ends {
env::panic("Token still in auction".as_bytes())
}
self.auctions.remove(&key);
self.move_corgi(key, token_id, owner.clone(), bidder, corgi);
Promise::new(owner.clone()).transfer(price);
for (bidder, (price, _timestamp)) in it {
Promise::new(bidder).transfer(price);
}
};
let mut it = bids.into_iter();
let signer = env::predecessor_account_id();
if signer == owner.clone() {
if let Some((bidder, (price, _timestamp))) = it.next() {
end_auction(it, bidder, price);
} else {
self.auctions.remove(&key);
}
} else {
if let Some((bidder, (price, _timestamp))) = it.next() {
if bidder == signer {
end_auction(it, bidder, price);
return;
}
}
match bids.remove(&signer) {
None => env::panic("Cannot clear an item if not bidding for it".as_bytes()),
Some((price, _)) => Promise::new(signer).transfer(price),
};
}
}
/// Internal method to transfer a corgi.
fn move_corgi(
&mut self,
key: CorgiKey,
id: CorgiId,
old_owner: AccountId,
new_owner: AccountId,
mut corgi: Corgi,
) {
self.delete_corgi_from(id, old_owner.clone());
corgi.owner = new_owner;
corgi.sender = old_owner;
corgi.modified = env::block_timestamp();
self.push_corgi(key, corgi);
}
/// Gets the `Corgi` with `id`.
fn get_corgi(&self, id: &CorgiId) -> (CorgiKey, Corgi) {
let key = decode(id);
match self.corgis.get(&key) {
None => env::panic("Given corgi id was not found".as_bytes()),
Some(corgi) => {
assert!(corgi.id == *id);
(key, corgi)
}
}
}
/// Gets auction information for the `Corgi` with `token_id` or panics.
fn get_auction(&self, token_id: &CorgiId) -> (CorgiKey, Dict<AccountId, (u128, u64)>, u64) {
let key = decode(&token_id);
match self.auctions.get(&key) {
None => env::panic("Corgi is not available for sale".as_bytes()),
Some((bids, expires)) => (key, bids, expires),
}
}
/// Gets sale information for a given `Corgi`.
fn get_for_sale(&self, key: CorgiKey, corgi: Corgi) -> CorgiDTO {
match self.auctions.get(&key) {
None => CorgiDTO::new(corgi),
Some(item) => CorgiDTO::for_sale(corgi, item),
}
}
/// Inserts a `Corgi` into the top the dictionary.
fn push_corgi(&mut self, key: CorgiKey, corgi: Corgi) -> Corgi {
env::log("push_corgi".as_bytes());
let corgi = self.corgis.push_front(&key, corgi);
let mut ids = self.corgis_by_owner.get(&corgi.owner).unwrap_or_else(|| {
Dict::new(get_collection_key(
CORGIS_BY_OWNER_PREFIX,
corgi.owner.clone(),
))
});
ids.push_front(&key, ());
self.corgis_by_owner.insert(&corgi.owner, &ids);
corgi
}
/// Ensures the given `Corgi` with `key` is not for sale.
fn panic_if_corgi_is_locked(&self, key: CorgiKey) {
if self.auctions.get(&key).is_some() {
env::panic("Corgi is currently locked".as_bytes());
}
} | }
fn get_collection_key(prefix: &str, mut key: String) -> Vec<u8> {
key.insert_str(0, prefix);
key.as_bytes().to_vec()
} | random_line_split |
|
lib.rs | #![deny(warnings)]
pub mod corgi;
pub mod dict;
#[cfg(test)]
pub mod tests;
use crate::corgi::{decode, encode, Corgi, CorgiDTO, CorgiId, CorgiKey, Rarity};
use crate::dict::Dict;
use near_env::near_envlog;
use near_sdk::{
borsh::{self, BorshDeserialize, BorshSerialize},
collections::UnorderedMap,
env,
json_types::U64,
near_bindgen,
wee_alloc::WeeAlloc,
AccountId, Balance, Promise,
};
use std::{convert::TryInto, mem::size_of, usize};
#[global_allocator]
static ALLOC: WeeAlloc = WeeAlloc::INIT;
/// Fee to pay (in yocto Ⓝ) to allow the user to store Corgis on-chain.
/// This value can be set by modifiying the `mint_fee` field in `config.json`.
const MINT_FEE: u128 = include!(concat!(env!("OUT_DIR"), "/mint_fee.val"));
/// Indicates how many Corgi are returned at most in the `get_global_corgis` method.
/// This value can be set by modifiying the `page_limit` field in `config.json`.
const PAGE_LIMIT: u32 = include!(concat!(env!("OUT_DIR"), "/page_limit.val"));
/// Keys used to identify our structures within the NEAR blockchain.
const CORGIS: &[u8] = b"a";
const CORGIS_BY_OWNER: &[u8] = b"b";
const CORGIS_BY_OWNER_PREFIX: &str = "B";
const AUCTIONS: &[u8] = b"d";
const AUCTIONS_PREFIX: &str = "D";
/// Holds our data model.
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Model {
/// A mapping from `CorgiKey` to `Corgi` to have quick access to corgis.
/// `Dict` is used to keep corgis sorted by creation timestamp.
corgis: Dict<CorgiKey, Corgi>,
/// Represents which account holds which `Corgi`.
/// Each account can own several corgis.
/// The inner `Dict` acts as a set, since it is mapped to `()`.
corgis_by_owner: UnorderedMap<AccountId, Dict<CorgiKey, ()>>,
/// Internal structure to store auctions for a given corgi.
/// It is a mapping from `CorgiKey` to a tuple.
/// The first component of the tuple is a `Dict`, which represents the bids for that corgi.
/// Each entry in this `Dict` maps the bidder (`AccountId`) to the bid price and bidding timestamp.
/// The seconds component of the tuple represents the expiration of the auction,
/// as a timestamp in nanoseconds.
auctions: UnorderedMap<CorgiKey, (Dict<AccountId, (Balance, u64)>, u64)>,
}
impl Default for Model {
fn de | -> Self {
env::log(format!("init v{}", env!("CARGO_PKG_VERSION")).as_bytes());
Self {
corgis: Dict::new(CORGIS.to_vec()),
corgis_by_owner: UnorderedMap::new(CORGIS_BY_OWNER.to_vec()),
auctions: UnorderedMap::new(AUCTIONS.to_vec()),
}
}
}
#[near_bindgen]
#[near_envlog(skip_args, only_pub)]
impl Model {
/// Creates a `Corgi` under the `predecessor_account_id`.
/// Returns the newly generated `Corgi`
/// The corgi `id` is encoded using base58.
/// This method is `payable` because the caller needs to cover the cost to mint the corgi.
/// The corresponding `attached_deposit` must be `MINT_FEE`.
#[payable]
pub fn create_corgi(
&mut self,
name: String,
quote: String,
color: String,
background_color: String,
) -> CorgiDTO {
let owner = env::predecessor_account_id();
let deposit = env::attached_deposit();
if deposit != MINT_FEE {
panic!("Deposit must be MINT_FEE but was {}", deposit)
}
macro_rules! check {
($value:ident, $max:expr, $message:expr) => {{
if $value.len() > $max {
env::panic($message.as_bytes());
}
}};
}
check!(name, 32, "Name too large");
check!(quote, 256, "Quote too large");
check!(color, 64, "Color too large");
check!(background_color, 64, "Backcolor too large");
let now = env::block_timestamp();
let key = env::random_seed()[..size_of::<CorgiKey>()]
.try_into()
.unwrap();
let corgi = Corgi {
id: encode(key),
name,
quote,
color,
background_color,
rate: Rarity::from_seed(env::random_seed()),
owner,
created: now,
modified: now,
sender: "".to_string(),
};
CorgiDTO::new(self.push_corgi(key, corgi))
}
/// Gets `Corgi` by the given `id`.
/// Panics if `id` is not found.
pub fn get_corgi_by_id(&self, id: CorgiId) -> CorgiDTO {
let (key, corgi) = self.get_corgi(&id);
self.get_for_sale(key, corgi)
}
/// Gets all `Corgi`s owned by the `owner` account id.
/// Empty `vec` if `owner` does not hold any `Corgi`.
pub fn get_corgis_by_owner(&self, owner: AccountId) -> Vec<CorgiDTO> {
match self.corgis_by_owner.get(&owner) {
None => Vec::new(),
Some(list) => list
.into_iter()
.map(|(key, _)| {
let maybe_corgi = self.corgis.get(&key);
assert!(maybe_corgi.is_some());
let corgi = maybe_corgi.unwrap();
assert!(corgi.id == encode(key));
assert!(corgi.owner == owner);
self.get_for_sale(key, corgi)
})
.collect(),
}
}
/// Delete the `Corgi` by its `id`.
/// Only the `owner` of the `Corgi` can delete it.
pub fn delete_corgi(&mut self, id: CorgiId) {
let owner = env::predecessor_account_id();
self.delete_corgi_from(id, owner);
}
/// Internal method to delete the corgi with `id` owned by `owner`.
/// Panics if `owner` does not own the corgi with `id`.
fn delete_corgi_from(&mut self, id: CorgiId, owner: AccountId) {
match self.corgis_by_owner.get(&owner) {
None => env::panic("You do not have corgis to delete from".as_bytes()),
Some(mut list) => {
let key = decode(&id);
self.panic_if_corgi_is_locked(key);
if list.remove(&key).is_none() {
env::panic("Corgi id does not belong to account".as_bytes());
}
self.corgis_by_owner.insert(&owner, &list);
let was_removed = self.corgis.remove(&key);
assert!(was_removed.is_some());
}
}
}
/// Returns a list of all `Corgi`s that have been created.
/// Number of `Corgi`s returned is limited by `PAGE_LIMIT`.
pub fn get_global_corgis(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, corgi) in &self.corgis {
if result.len() >= PAGE_LIMIT as usize {
break;
}
result.push(self.get_for_sale(key, corgi));
}
result
}
/// Transfer the Corgi with the given `id` to `receiver`.
/// Only the `owner` of the corgi can make such a transfer.
pub fn transfer_corgi(&mut self, receiver: AccountId, id: CorgiId) {
if !env::is_valid_account_id(receiver.as_bytes()) {
env::panic("Invalid receiver account id".as_bytes());
}
let sender = env::predecessor_account_id();
if sender == receiver {
env::panic("Self transfers are not allowed".as_bytes());
}
let (key, corgi) = self.get_corgi(&id);
assert_eq!(corgi.id, id);
if sender != corgi.owner {
env::panic("Sender must own Corgi".as_bytes());
}
self.panic_if_corgi_is_locked(key);
self.move_corgi(key, id, sender, receiver, corgi)
}
/// Returns all `Corgi`s currently for sale.
/// That is, all `Corgi`s which are in auction.
pub fn get_items_for_sale(&self) -> Vec<CorgiDTO> {
let mut result = Vec::new();
for (key, item) in self.auctions.iter() {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
let corgi = corgi.unwrap();
result.push(CorgiDTO::for_sale(corgi, item));
}
result
}
/// Puts the given `Corgi` for sale.
/// The `duration` indicates for how long the auction should last, in seconds.
pub fn add_item_for_sale(&mut self, token_id: CorgiId, duration: u32) -> U64 {
let (key, corgi) = self.get_corgi(&token_id);
if corgi.owner != env::predecessor_account_id() {
env::panic("Only token owner can add item for sale".as_bytes())
}
if let None = self.auctions.get(&key) {
let bids = Dict::new(get_collection_key(AUCTIONS_PREFIX, token_id));
let expires = env::block_timestamp() + duration as u64 * 1_000_000_000;
self.auctions.insert(&key, &(bids, expires));
U64(expires)
} else {
env::panic("Corgi already for sale".as_bytes());
}
}
/// Makes a bid for a `Corgi` already in auction.
/// This is a `payable` method, meaning the contract will escrow the `attached_deposit`
/// until the auction ends.
#[payable]
pub fn bid_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let bidder = env::predecessor_account_id();
if bidder == self.corgis.get(&key).expect("Corgi not found").owner {
env::panic("You cannot bid for your own Corgi".as_bytes())
}
if env::block_timestamp() > auction_ends {
env::panic("Auction for corgi has expired".as_bytes())
}
let price = env::attached_deposit() + bids.get(&bidder).map(|(p, _)| p).unwrap_or_default();
let top_price = bids.into_iter().next().map(|(_, (p, _))| p).unwrap_or(0);
if price <= top_price {
panic!("Bid {} does not cover top bid {}", price, top_price)
}
bids.remove(&bidder);
bids.push_front(&bidder, (price, env::block_timestamp()));
self.auctions.insert(&key, &(bids, auction_ends));
}
/// Makes a clearance for the given `Corgi`.
/// Only the corgi `owner` or the highest bidder can end an auction after it expires.
/// All other bidders can get their money back when calling this method.
pub fn clearance_for_item(&mut self, token_id: CorgiId) {
let (key, mut bids, auction_ends) = self.get_auction(&token_id);
let corgi = {
let corgi = self.corgis.get(&key);
assert!(corgi.is_some());
corgi.unwrap()
};
let owner = corgi.owner.clone();
let end_auction = |it, bidder, price| {
if env::block_timestamp() <= auction_ends {
env::panic("Token still in auction".as_bytes())
}
self.auctions.remove(&key);
self.move_corgi(key, token_id, owner.clone(), bidder, corgi);
Promise::new(owner.clone()).transfer(price);
for (bidder, (price, _timestamp)) in it {
Promise::new(bidder).transfer(price);
}
};
let mut it = bids.into_iter();
let signer = env::predecessor_account_id();
if signer == owner.clone() {
if let Some((bidder, (price, _timestamp))) = it.next() {
end_auction(it, bidder, price);
} else {
self.auctions.remove(&key);
}
} else {
if let Some((bidder, (price, _timestamp))) = it.next() {
if bidder == signer {
end_auction(it, bidder, price);
return;
}
}
match bids.remove(&signer) {
None => env::panic("Cannot clear an item if not bidding for it".as_bytes()),
Some((price, _)) => Promise::new(signer).transfer(price),
};
}
}
/// Internal method to transfer a corgi.
fn move_corgi(
&mut self,
key: CorgiKey,
id: CorgiId,
old_owner: AccountId,
new_owner: AccountId,
mut corgi: Corgi,
) {
self.delete_corgi_from(id, old_owner.clone());
corgi.owner = new_owner;
corgi.sender = old_owner;
corgi.modified = env::block_timestamp();
self.push_corgi(key, corgi);
}
/// Gets the `Corgi` with `id`.
fn get_corgi(&self, id: &CorgiId) -> (CorgiKey, Corgi) {
let key = decode(id);
match self.corgis.get(&key) {
None => env::panic("Given corgi id was not found".as_bytes()),
Some(corgi) => {
assert!(corgi.id == *id);
(key, corgi)
}
}
}
/// Gets auction information for the `Corgi` with `token_id` or panics.
fn get_auction(&self, token_id: &CorgiId) -> (CorgiKey, Dict<AccountId, (u128, u64)>, u64) {
let key = decode(&token_id);
match self.auctions.get(&key) {
None => env::panic("Corgi is not available for sale".as_bytes()),
Some((bids, expires)) => (key, bids, expires),
}
}
/// Gets sale information for a given `Corgi`.
fn get_for_sale(&self, key: CorgiKey, corgi: Corgi) -> CorgiDTO {
match self.auctions.get(&key) {
None => CorgiDTO::new(corgi),
Some(item) => CorgiDTO::for_sale(corgi, item),
}
}
/// Inserts a `Corgi` into the top the dictionary.
fn push_corgi(&mut self, key: CorgiKey, corgi: Corgi) -> Corgi {
env::log("push_corgi".as_bytes());
let corgi = self.corgis.push_front(&key, corgi);
let mut ids = self.corgis_by_owner.get(&corgi.owner).unwrap_or_else(|| {
Dict::new(get_collection_key(
CORGIS_BY_OWNER_PREFIX,
corgi.owner.clone(),
))
});
ids.push_front(&key, ());
self.corgis_by_owner.insert(&corgi.owner, &ids);
corgi
}
/// Ensures the given `Corgi` with `key` is not for sale.
fn panic_if_corgi_is_locked(&self, key: CorgiKey) {
if self.auctions.get(&key).is_some() {
env::panic("Corgi is currently locked".as_bytes());
}
}
}
fn get_collection_key(prefix: &str, mut key: String) -> Vec<u8> {
key.insert_str(0, prefix);
key.as_bytes().to_vec()
}
| fault() | identifier_name |
index.js | 'use strict';
import React, { Component } from 'react';
import { Image, View, Switch, TouchableOpacity, Platform ,TextInput} from 'react-native';
import { connect } from 'react-redux';
import {Actions} from 'react-native-router-flux';
import { Container, Header, Content, Text, Button, Icon, Thumbnail, InputGroup, Input } from 'native-base';
import DatePicker from 'react-native-datepicker';
var Define = require('../../../Define');
var Debug = require('../../../Util/Debug');
var Themes = require('../../../Themes');
var Util = require('../../../Util/Util');
var Include = require('../../../Include');
var {popupActions} = require('../../popups/PopupManager');
import DefaultPopup from '../../popups/DefaultPopup'
var {globalVariableManager}= require('../../modules/GlobalVariableManager');
import isEmail from 'validator/lib/isEmail';
import FadeDownDefaultPopup from '../../popups/FadeDownDefaultPopup'
import UserActions_MiddleWare from '../../../actions/UserActions_MiddleWare'
import styles from './styles';
var primary = Themes.current.factor.brandPrimary;
let _ = require('lodash');
import PropTypes from 'prop-types'
React.PropTypes = PropTypes;
class Settings extends Component {
constructor(props) {
super(props);
let {user} = this.props;
this.state = {};
this.handleUpdateProfile = this.handleUpdateProfile.bind(this);
this.userInfo={
Username: _.get(user, 'memberInfo.member.name', ''),
email: _.get(user, 'memberInfo.member.email', ''),
phone: _.get(user, 'memberInfo.member.phone', '')
}
this.constructor.childContextTypes = {
theme: React.PropTypes.object,
}
this.infoFromToken = null;
this.handleLoginFacebook = this.handleLoginFacebook.bind(this);
this.updateProfile = this.updateProfile.bind(this);
this.forceUpdate = (!user.memberInfo.member.facebook.name);
}
updateProfile(objUpdate) {
var {dispatch,user} = this.props;
dispatch(UserActions_MiddleWare.updateProfile(objUpdate))
.then(()=>{
globalVariableManager.rootView.showToast('Cập nhật thông tin thành công');
dispatch(UserActions_MiddleWare.get())
.then(() => {
if(this.forceUpdate) {
return (
Actions.SwitchModeScreen({
type: 'reset'
})
)
}
})
})
.catch(err => {
popupActions.setRenderContentAndShow(FadeDownDefaultPopup,
{
description:'Cập nhật thông tin thất bại'
})
})
}
handleUpdateProfile() {
let {dispatch, user} = this.props;
let message = '';
let username = this.userInfo.Username.trim();
let email = this.userInfo.email.trim();
const objUpdate = {
name:this.userInfo.Username,
email:this.userInfo.email,
};
if(username === '') { | if(email === '') {
if(!message) {
message = 'Email không được để trống'
} else {
message += '\nEmail không được để trống';
}
}
if (user.memberInfo.member.facebook.name
&& objUpdate.name.trim() === _.get(user, 'memberInfo.member.name', '')
&& objUpdate.email.trim() === _.get(user, 'memberInfo.member.email', '')) {
message = 'Bạn chưa thay đổi thông tin cập nhật'
}
if(message) {
globalVariableManager.rootView.showToast(message)
} else {
this.updateProfile(objUpdate);
}
}
handleLoginFacebook(accessToken) {
const self = this;
Debug.log2(`handleLogin`, accessToken);
let {dispatch, appSetting} = this.props;
let token = '';
let objUpdate = {};
dispatch(UserActions_MiddleWare.loginFacebook())
.then(access_token => {
token = access_token;
return dispatch(UserActions_MiddleWare.getInfoFromAccessToken({access_token}));
})
.then((result) => {
this.infoFromToken = result.res.data;
this.infoFromToken.access_token = token;
objUpdate = {
name: this.infoFromToken.name,
email: this.infoFromToken.email,
avatar: this.infoFromToken.picture,
id: this.infoFromToken.id,
access_token: this.infoFromToken.access_token
}
this.updateProfile(objUpdate);
})
.catch(err => {
globalVariableManager.rootView.showToast('Đã có lỗi xảy ra với quá trình đăng nhập Facebook');
});
}
render() {
var {user,dispatch,appSetting} = this.props;
let isAuthen = false;
if(appSetting.mode === 'shipper') {
isAuthen = _.get(user, 'memberInfo.member.ship.isAuthen', 0);
} else {
isAuthen = _.get(user, 'memberInfo.member.shop.isAuthen', 0);
}
return (
<View style={styles.container}>
<View style={{justifyContent: 'center'}}>
<View style={styles.bg}>
<View style={{marginTop: 20,flexDirection:'row'}}>
{appSetting.mode === 'shipper'?
<Image style={{position: 'absolute', top: 10, right: 10, left: 10, width: 100, height: 100, backgroundColor: 'transparent'}} source={isAuthen ? Define.assets.Images.daxacthucship : Define.assets.Images.chuaxacthuc} /> : null}
<View style={{flex:1}}>
<TouchableOpacity style={{alignSelf: 'center'}}>
{user.memberInfo.member.facebook.picture?
<View>
<Thumbnail source={{uri:user.memberInfo.member.facebook.picture}} style={styles.profilePic} />
</View>:
<View style={{width:60,height:60}}></View>
}
</TouchableOpacity>
</View>
</View>
{user.memberInfo.member.facebook.name ? null :
<TouchableOpacity
onPress={() => {
this.handleLoginFacebook();
}}>
<View style={{backgroundColor: '#3b5998', borderRadius: 25, height: 40, marginTop:15, alignItems: 'center', justifyContent: 'center', paddingHorizontal: 20, marginHorizontal:20}}>
<Include.Text style={{color: '#fff', fontWeight: 'bold'}}>
Cập nhật thông tin qua Facebook
</Include.Text>
</View>
</TouchableOpacity>
}
<View style={styles.signupContainer}>
<View style={styles.inputGrp}>
<Icon name='ios-person-outline' style={styles.icon} />
<TextInput
underlineColorAndroid ='transparent'
placeholder='Họ và tên'
defaultValue={_.get(user, 'memberInfo.member.name', '')}
blurOnSubmit={false}
placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.Username=text}}
onSubmitEditing={() => {
if (this.refs.EmailInput) {
this.refs.EmailInput.focus();
}
}}/>
</View>
<View style={styles.inputGrp}>
<Icon name='ios-mail-open-outline' style={styles.icon}/>
<TextInput ref='EmailInput' underlineColorAndroid ='transparent' placeholder='Email' defaultValue={user.memberInfo.member.email?user.memberInfo.member.email:''} placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.email=text }}
onSubmitEditing={() => {
if (this.refs.AddrInput) {
this.refs.AddrInput.focus();
}
}}
/>
</View>
<TouchableOpacity
onPress={() => {}}>
<View style={[styles.inputGrp,{paddingVertical:8}]}>
<Icon name='ios-call-outline' style={{color: '#fff'}}/>
<Include.Text style={{color:'#ffffff',fontSize:15, paddingLeft: 15}}>{this.userInfo.phone} </Include.Text>
{/*<TextInput underlineColorAndroid ='transparent' placeholder='Số điện thoại' defaultValue={this.userInfo.phone} placeholderTextColor='#bdc3c7' style={styles.input}
editable={false} />*/}
</View>
</TouchableOpacity>
</View>
<Button
style={{alignSelf: 'center'}}
onPress={this.handleUpdateProfile}>
<Text>Cập nhật</Text>
</Button>
{appSetting.mode === 'shop' ?
<TouchableOpacity
onPress={() => {
Actions.AuthenticationScreen({
type: 'reset'
})
}}>
<Include.Text style={{paddingRight: 20, paddingTop: 5, color: '#3498db', textAlign: 'right', fontSize: 16}}>Thông tin xác thực</Include.Text>
</TouchableOpacity>
: null}
<Include.Text style={{alignSelf: 'center', backgroundColor: 'transparent', color: '#ecf0f1', fontStyle: 'italic', paddingTop: 5}}>{`Mã giới thiệu: ${_.get(user, 'memberInfo.member.phone')}`}</Include.Text>
</View>
</View>
</View>
)
}
}
// function bindAction(dispatch) {
// return {
// }
// }
//
// export default connect(null, bindAction)(Settings);
export default Settings; | message += 'Họ và tên không được để trống';
} | random_line_split |
index.js | 'use strict';
import React, { Component } from 'react';
import { Image, View, Switch, TouchableOpacity, Platform ,TextInput} from 'react-native';
import { connect } from 'react-redux';
import {Actions} from 'react-native-router-flux';
import { Container, Header, Content, Text, Button, Icon, Thumbnail, InputGroup, Input } from 'native-base';
import DatePicker from 'react-native-datepicker';
var Define = require('../../../Define');
var Debug = require('../../../Util/Debug');
var Themes = require('../../../Themes');
var Util = require('../../../Util/Util');
var Include = require('../../../Include');
var {popupActions} = require('../../popups/PopupManager');
import DefaultPopup from '../../popups/DefaultPopup'
var {globalVariableManager}= require('../../modules/GlobalVariableManager');
import isEmail from 'validator/lib/isEmail';
import FadeDownDefaultPopup from '../../popups/FadeDownDefaultPopup'
import UserActions_MiddleWare from '../../../actions/UserActions_MiddleWare'
import styles from './styles';
var primary = Themes.current.factor.brandPrimary;
let _ = require('lodash');
import PropTypes from 'prop-types'
React.PropTypes = PropTypes;
class Settings extends Component {
constructor(props) |
updateProfile(objUpdate) {
var {dispatch,user} = this.props;
dispatch(UserActions_MiddleWare.updateProfile(objUpdate))
.then(()=>{
globalVariableManager.rootView.showToast('Cập nhật thông tin thành công');
dispatch(UserActions_MiddleWare.get())
.then(() => {
if(this.forceUpdate) {
return (
Actions.SwitchModeScreen({
type: 'reset'
})
)
}
})
})
.catch(err => {
popupActions.setRenderContentAndShow(FadeDownDefaultPopup,
{
description:'Cập nhật thông tin thất bại'
})
})
}
handleUpdateProfile() {
let {dispatch, user} = this.props;
let message = '';
let username = this.userInfo.Username.trim();
let email = this.userInfo.email.trim();
const objUpdate = {
name:this.userInfo.Username,
email:this.userInfo.email,
};
if(username === '') {
message += 'Họ và tên không được để trống';
}
if(email === '') {
if(!message) {
message = 'Email không được để trống'
} else {
message += '\nEmail không được để trống';
}
}
if (user.memberInfo.member.facebook.name
&& objUpdate.name.trim() === _.get(user, 'memberInfo.member.name', '')
&& objUpdate.email.trim() === _.get(user, 'memberInfo.member.email', '')) {
message = 'Bạn chưa thay đổi thông tin cập nhật'
}
if(message) {
globalVariableManager.rootView.showToast(message)
} else {
this.updateProfile(objUpdate);
}
}
handleLoginFacebook(accessToken) {
const self = this;
Debug.log2(`handleLogin`, accessToken);
let {dispatch, appSetting} = this.props;
let token = '';
let objUpdate = {};
dispatch(UserActions_MiddleWare.loginFacebook())
.then(access_token => {
token = access_token;
return dispatch(UserActions_MiddleWare.getInfoFromAccessToken({access_token}));
})
.then((result) => {
this.infoFromToken = result.res.data;
this.infoFromToken.access_token = token;
objUpdate = {
name: this.infoFromToken.name,
email: this.infoFromToken.email,
avatar: this.infoFromToken.picture,
id: this.infoFromToken.id,
access_token: this.infoFromToken.access_token
}
this.updateProfile(objUpdate);
})
.catch(err => {
globalVariableManager.rootView.showToast('Đã có lỗi xảy ra với quá trình đăng nhập Facebook');
});
}
render() {
var {user,dispatch,appSetting} = this.props;
let isAuthen = false;
if(appSetting.mode === 'shipper') {
isAuthen = _.get(user, 'memberInfo.member.ship.isAuthen', 0);
} else {
isAuthen = _.get(user, 'memberInfo.member.shop.isAuthen', 0);
}
return (
<View style={styles.container}>
<View style={{justifyContent: 'center'}}>
<View style={styles.bg}>
<View style={{marginTop: 20,flexDirection:'row'}}>
{appSetting.mode === 'shipper'?
<Image style={{position: 'absolute', top: 10, right: 10, left: 10, width: 100, height: 100, backgroundColor: 'transparent'}} source={isAuthen ? Define.assets.Images.daxacthucship : Define.assets.Images.chuaxacthuc} /> : null}
<View style={{flex:1}}>
<TouchableOpacity style={{alignSelf: 'center'}}>
{user.memberInfo.member.facebook.picture?
<View>
<Thumbnail source={{uri:user.memberInfo.member.facebook.picture}} style={styles.profilePic} />
</View>:
<View style={{width:60,height:60}}></View>
}
</TouchableOpacity>
</View>
</View>
{user.memberInfo.member.facebook.name ? null :
<TouchableOpacity
onPress={() => {
this.handleLoginFacebook();
}}>
<View style={{backgroundColor: '#3b5998', borderRadius: 25, height: 40, marginTop:15, alignItems: 'center', justifyContent: 'center', paddingHorizontal: 20, marginHorizontal:20}}>
<Include.Text style={{color: '#fff', fontWeight: 'bold'}}>
Cập nhật thông tin qua Facebook
</Include.Text>
</View>
</TouchableOpacity>
}
<View style={styles.signupContainer}>
<View style={styles.inputGrp}>
<Icon name='ios-person-outline' style={styles.icon} />
<TextInput
underlineColorAndroid ='transparent'
placeholder='Họ và tên'
defaultValue={_.get(user, 'memberInfo.member.name', '')}
blurOnSubmit={false}
placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.Username=text}}
onSubmitEditing={() => {
if (this.refs.EmailInput) {
this.refs.EmailInput.focus();
}
}}/>
</View>
<View style={styles.inputGrp}>
<Icon name='ios-mail-open-outline' style={styles.icon}/>
<TextInput ref='EmailInput' underlineColorAndroid ='transparent' placeholder='Email' defaultValue={user.memberInfo.member.email?user.memberInfo.member.email:''} placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.email=text }}
onSubmitEditing={() => {
if (this.refs.AddrInput) {
this.refs.AddrInput.focus();
}
}}
/>
</View>
<TouchableOpacity
onPress={() => {}}>
<View style={[styles.inputGrp,{paddingVertical:8}]}>
<Icon name='ios-call-outline' style={{color: '#fff'}}/>
<Include.Text style={{color:'#ffffff',fontSize:15, paddingLeft: 15}}>{this.userInfo.phone} </Include.Text>
{/*<TextInput underlineColorAndroid ='transparent' placeholder='Số điện thoại' defaultValue={this.userInfo.phone} placeholderTextColor='#bdc3c7' style={styles.input}
editable={false} />*/}
</View>
</TouchableOpacity>
</View>
<Button
style={{alignSelf: 'center'}}
onPress={this.handleUpdateProfile}>
<Text>Cập nhật</Text>
</Button>
{appSetting.mode === 'shop' ?
<TouchableOpacity
onPress={() => {
Actions.AuthenticationScreen({
type: 'reset'
})
}}>
<Include.Text style={{paddingRight: 20, paddingTop: 5, color: '#3498db', textAlign: 'right', fontSize: 16}}>Thông tin xác thực</Include.Text>
</TouchableOpacity>
: null}
<Include.Text style={{alignSelf: 'center', backgroundColor: 'transparent', color: '#ecf0f1', fontStyle: 'italic', paddingTop: 5}}>{`Mã giới thiệu: ${_.get(user, 'memberInfo.member.phone')}`}</Include.Text>
</View>
</View>
</View>
)
}
}
// function bindAction(dispatch) {
// return {
// }
// }
//
// export default connect(null, bindAction)(Settings);
export default Settings;
| {
super(props);
let {user} = this.props;
this.state = {};
this.handleUpdateProfile = this.handleUpdateProfile.bind(this);
this.userInfo={
Username: _.get(user, 'memberInfo.member.name', ''),
email: _.get(user, 'memberInfo.member.email', ''),
phone: _.get(user, 'memberInfo.member.phone', '')
}
this.constructor.childContextTypes = {
theme: React.PropTypes.object,
}
this.infoFromToken = null;
this.handleLoginFacebook = this.handleLoginFacebook.bind(this);
this.updateProfile = this.updateProfile.bind(this);
this.forceUpdate = (!user.memberInfo.member.facebook.name);
} | identifier_body |
index.js | 'use strict';
import React, { Component } from 'react';
import { Image, View, Switch, TouchableOpacity, Platform ,TextInput} from 'react-native';
import { connect } from 'react-redux';
import {Actions} from 'react-native-router-flux';
import { Container, Header, Content, Text, Button, Icon, Thumbnail, InputGroup, Input } from 'native-base';
import DatePicker from 'react-native-datepicker';
var Define = require('../../../Define');
var Debug = require('../../../Util/Debug');
var Themes = require('../../../Themes');
var Util = require('../../../Util/Util');
var Include = require('../../../Include');
var {popupActions} = require('../../popups/PopupManager');
import DefaultPopup from '../../popups/DefaultPopup'
var {globalVariableManager}= require('../../modules/GlobalVariableManager');
import isEmail from 'validator/lib/isEmail';
import FadeDownDefaultPopup from '../../popups/FadeDownDefaultPopup'
import UserActions_MiddleWare from '../../../actions/UserActions_MiddleWare'
import styles from './styles';
var primary = Themes.current.factor.brandPrimary;
let _ = require('lodash');
import PropTypes from 'prop-types'
React.PropTypes = PropTypes;
class Settings extends Component {
constructor(props) {
super(props);
let {user} = this.props;
this.state = {};
this.handleUpdateProfile = this.handleUpdateProfile.bind(this);
this.userInfo={
Username: _.get(user, 'memberInfo.member.name', ''),
email: _.get(user, 'memberInfo.member.email', ''),
phone: _.get(user, 'memberInfo.member.phone', '')
}
this.constructor.childContextTypes = {
theme: React.PropTypes.object,
}
this.infoFromToken = null;
this.handleLoginFacebook = this.handleLoginFacebook.bind(this);
this.updateProfile = this.updateProfile.bind(this);
this.forceUpdate = (!user.memberInfo.member.facebook.name);
}
| (objUpdate) {
var {dispatch,user} = this.props;
dispatch(UserActions_MiddleWare.updateProfile(objUpdate))
.then(()=>{
globalVariableManager.rootView.showToast('Cập nhật thông tin thành công');
dispatch(UserActions_MiddleWare.get())
.then(() => {
if(this.forceUpdate) {
return (
Actions.SwitchModeScreen({
type: 'reset'
})
)
}
})
})
.catch(err => {
popupActions.setRenderContentAndShow(FadeDownDefaultPopup,
{
description:'Cập nhật thông tin thất bại'
})
})
}
handleUpdateProfile() {
let {dispatch, user} = this.props;
let message = '';
let username = this.userInfo.Username.trim();
let email = this.userInfo.email.trim();
const objUpdate = {
name:this.userInfo.Username,
email:this.userInfo.email,
};
if(username === '') {
message += 'Họ và tên không được để trống';
}
if(email === '') {
if(!message) {
message = 'Email không được để trống'
} else {
message += '\nEmail không được để trống';
}
}
if (user.memberInfo.member.facebook.name
&& objUpdate.name.trim() === _.get(user, 'memberInfo.member.name', '')
&& objUpdate.email.trim() === _.get(user, 'memberInfo.member.email', '')) {
message = 'Bạn chưa thay đổi thông tin cập nhật'
}
if(message) {
globalVariableManager.rootView.showToast(message)
} else {
this.updateProfile(objUpdate);
}
}
handleLoginFacebook(accessToken) {
const self = this;
Debug.log2(`handleLogin`, accessToken);
let {dispatch, appSetting} = this.props;
let token = '';
let objUpdate = {};
dispatch(UserActions_MiddleWare.loginFacebook())
.then(access_token => {
token = access_token;
return dispatch(UserActions_MiddleWare.getInfoFromAccessToken({access_token}));
})
.then((result) => {
this.infoFromToken = result.res.data;
this.infoFromToken.access_token = token;
objUpdate = {
name: this.infoFromToken.name,
email: this.infoFromToken.email,
avatar: this.infoFromToken.picture,
id: this.infoFromToken.id,
access_token: this.infoFromToken.access_token
}
this.updateProfile(objUpdate);
})
.catch(err => {
globalVariableManager.rootView.showToast('Đã có lỗi xảy ra với quá trình đăng nhập Facebook');
});
}
render() {
var {user,dispatch,appSetting} = this.props;
let isAuthen = false;
if(appSetting.mode === 'shipper') {
isAuthen = _.get(user, 'memberInfo.member.ship.isAuthen', 0);
} else {
isAuthen = _.get(user, 'memberInfo.member.shop.isAuthen', 0);
}
return (
<View style={styles.container}>
<View style={{justifyContent: 'center'}}>
<View style={styles.bg}>
<View style={{marginTop: 20,flexDirection:'row'}}>
{appSetting.mode === 'shipper'?
<Image style={{position: 'absolute', top: 10, right: 10, left: 10, width: 100, height: 100, backgroundColor: 'transparent'}} source={isAuthen ? Define.assets.Images.daxacthucship : Define.assets.Images.chuaxacthuc} /> : null}
<View style={{flex:1}}>
<TouchableOpacity style={{alignSelf: 'center'}}>
{user.memberInfo.member.facebook.picture?
<View>
<Thumbnail source={{uri:user.memberInfo.member.facebook.picture}} style={styles.profilePic} />
</View>:
<View style={{width:60,height:60}}></View>
}
</TouchableOpacity>
</View>
</View>
{user.memberInfo.member.facebook.name ? null :
<TouchableOpacity
onPress={() => {
this.handleLoginFacebook();
}}>
<View style={{backgroundColor: '#3b5998', borderRadius: 25, height: 40, marginTop:15, alignItems: 'center', justifyContent: 'center', paddingHorizontal: 20, marginHorizontal:20}}>
<Include.Text style={{color: '#fff', fontWeight: 'bold'}}>
Cập nhật thông tin qua Facebook
</Include.Text>
</View>
</TouchableOpacity>
}
<View style={styles.signupContainer}>
<View style={styles.inputGrp}>
<Icon name='ios-person-outline' style={styles.icon} />
<TextInput
underlineColorAndroid ='transparent'
placeholder='Họ và tên'
defaultValue={_.get(user, 'memberInfo.member.name', '')}
blurOnSubmit={false}
placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.Username=text}}
onSubmitEditing={() => {
if (this.refs.EmailInput) {
this.refs.EmailInput.focus();
}
}}/>
</View>
<View style={styles.inputGrp}>
<Icon name='ios-mail-open-outline' style={styles.icon}/>
<TextInput ref='EmailInput' underlineColorAndroid ='transparent' placeholder='Email' defaultValue={user.memberInfo.member.email?user.memberInfo.member.email:''} placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.email=text }}
onSubmitEditing={() => {
if (this.refs.AddrInput) {
this.refs.AddrInput.focus();
}
}}
/>
</View>
<TouchableOpacity
onPress={() => {}}>
<View style={[styles.inputGrp,{paddingVertical:8}]}>
<Icon name='ios-call-outline' style={{color: '#fff'}}/>
<Include.Text style={{color:'#ffffff',fontSize:15, paddingLeft: 15}}>{this.userInfo.phone} </Include.Text>
{/*<TextInput underlineColorAndroid ='transparent' placeholder='Số điện thoại' defaultValue={this.userInfo.phone} placeholderTextColor='#bdc3c7' style={styles.input}
editable={false} />*/}
</View>
</TouchableOpacity>
</View>
<Button
style={{alignSelf: 'center'}}
onPress={this.handleUpdateProfile}>
<Text>Cập nhật</Text>
</Button>
{appSetting.mode === 'shop' ?
<TouchableOpacity
onPress={() => {
Actions.AuthenticationScreen({
type: 'reset'
})
}}>
<Include.Text style={{paddingRight: 20, paddingTop: 5, color: '#3498db', textAlign: 'right', fontSize: 16}}>Thông tin xác thực</Include.Text>
</TouchableOpacity>
: null}
<Include.Text style={{alignSelf: 'center', backgroundColor: 'transparent', color: '#ecf0f1', fontStyle: 'italic', paddingTop: 5}}>{`Mã giới thiệu: ${_.get(user, 'memberInfo.member.phone')}`}</Include.Text>
</View>
</View>
</View>
)
}
}
// function bindAction(dispatch) {
// return {
// }
// }
//
// export default connect(null, bindAction)(Settings);
export default Settings;
| updateProfile | identifier_name |
index.js | 'use strict';
import React, { Component } from 'react';
import { Image, View, Switch, TouchableOpacity, Platform ,TextInput} from 'react-native';
import { connect } from 'react-redux';
import {Actions} from 'react-native-router-flux';
import { Container, Header, Content, Text, Button, Icon, Thumbnail, InputGroup, Input } from 'native-base';
import DatePicker from 'react-native-datepicker';
var Define = require('../../../Define');
var Debug = require('../../../Util/Debug');
var Themes = require('../../../Themes');
var Util = require('../../../Util/Util');
var Include = require('../../../Include');
var {popupActions} = require('../../popups/PopupManager');
import DefaultPopup from '../../popups/DefaultPopup'
var {globalVariableManager}= require('../../modules/GlobalVariableManager');
import isEmail from 'validator/lib/isEmail';
import FadeDownDefaultPopup from '../../popups/FadeDownDefaultPopup'
import UserActions_MiddleWare from '../../../actions/UserActions_MiddleWare'
import styles from './styles';
var primary = Themes.current.factor.brandPrimary;
let _ = require('lodash');
import PropTypes from 'prop-types'
React.PropTypes = PropTypes;
class Settings extends Component {
constructor(props) {
super(props);
let {user} = this.props;
this.state = {};
this.handleUpdateProfile = this.handleUpdateProfile.bind(this);
this.userInfo={
Username: _.get(user, 'memberInfo.member.name', ''),
email: _.get(user, 'memberInfo.member.email', ''),
phone: _.get(user, 'memberInfo.member.phone', '')
}
this.constructor.childContextTypes = {
theme: React.PropTypes.object,
}
this.infoFromToken = null;
this.handleLoginFacebook = this.handleLoginFacebook.bind(this);
this.updateProfile = this.updateProfile.bind(this);
this.forceUpdate = (!user.memberInfo.member.facebook.name);
}
updateProfile(objUpdate) {
var {dispatch,user} = this.props;
dispatch(UserActions_MiddleWare.updateProfile(objUpdate))
.then(()=>{
globalVariableManager.rootView.showToast('Cập nhật thông tin thành công');
dispatch(UserActions_MiddleWare.get())
.then(() => {
if(this.forceUpdate) {
return (
Actions.SwitchModeScreen({
type: 'reset'
})
)
}
})
})
.catch(err => {
popupActions.setRenderContentAndShow(FadeDownDefaultPopup,
{
description:'Cập nhật thông tin thất bại'
})
})
}
handleUpdateProfile() {
let {dispatch, user} = this.props;
let message = '';
let username = this.userInfo.Username.trim();
let email = this.userInfo.email.trim();
const objUpdate = {
name:this.userInfo.Username,
email:this.userInfo.email,
};
if(username === '') {
messag | if(!message) {
message = 'Email không được để trống'
} else {
message += '\nEmail không được để trống';
}
}
if (user.memberInfo.member.facebook.name
&& objUpdate.name.trim() === _.get(user, 'memberInfo.member.name', '')
&& objUpdate.email.trim() === _.get(user, 'memberInfo.member.email', '')) {
message = 'Bạn chưa thay đổi thông tin cập nhật'
}
if(message) {
globalVariableManager.rootView.showToast(message)
} else {
this.updateProfile(objUpdate);
}
}
handleLoginFacebook(accessToken) {
const self = this;
Debug.log2(`handleLogin`, accessToken);
let {dispatch, appSetting} = this.props;
let token = '';
let objUpdate = {};
dispatch(UserActions_MiddleWare.loginFacebook())
.then(access_token => {
token = access_token;
return dispatch(UserActions_MiddleWare.getInfoFromAccessToken({access_token}));
})
.then((result) => {
this.infoFromToken = result.res.data;
this.infoFromToken.access_token = token;
objUpdate = {
name: this.infoFromToken.name,
email: this.infoFromToken.email,
avatar: this.infoFromToken.picture,
id: this.infoFromToken.id,
access_token: this.infoFromToken.access_token
}
this.updateProfile(objUpdate);
})
.catch(err => {
globalVariableManager.rootView.showToast('Đã có lỗi xảy ra với quá trình đăng nhập Facebook');
});
}
render() {
var {user,dispatch,appSetting} = this.props;
let isAuthen = false;
if(appSetting.mode === 'shipper') {
isAuthen = _.get(user, 'memberInfo.member.ship.isAuthen', 0);
} else {
isAuthen = _.get(user, 'memberInfo.member.shop.isAuthen', 0);
}
return (
<View style={styles.container}>
<View style={{justifyContent: 'center'}}>
<View style={styles.bg}>
<View style={{marginTop: 20,flexDirection:'row'}}>
{appSetting.mode === 'shipper'?
<Image style={{position: 'absolute', top: 10, right: 10, left: 10, width: 100, height: 100, backgroundColor: 'transparent'}} source={isAuthen ? Define.assets.Images.daxacthucship : Define.assets.Images.chuaxacthuc} /> : null}
<View style={{flex:1}}>
<TouchableOpacity style={{alignSelf: 'center'}}>
{user.memberInfo.member.facebook.picture?
<View>
<Thumbnail source={{uri:user.memberInfo.member.facebook.picture}} style={styles.profilePic} />
</View>:
<View style={{width:60,height:60}}></View>
}
</TouchableOpacity>
</View>
</View>
{user.memberInfo.member.facebook.name ? null :
<TouchableOpacity
onPress={() => {
this.handleLoginFacebook();
}}>
<View style={{backgroundColor: '#3b5998', borderRadius: 25, height: 40, marginTop:15, alignItems: 'center', justifyContent: 'center', paddingHorizontal: 20, marginHorizontal:20}}>
<Include.Text style={{color: '#fff', fontWeight: 'bold'}}>
Cập nhật thông tin qua Facebook
</Include.Text>
</View>
</TouchableOpacity>
}
<View style={styles.signupContainer}>
<View style={styles.inputGrp}>
<Icon name='ios-person-outline' style={styles.icon} />
<TextInput
underlineColorAndroid ='transparent'
placeholder='Họ và tên'
defaultValue={_.get(user, 'memberInfo.member.name', '')}
blurOnSubmit={false}
placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.Username=text}}
onSubmitEditing={() => {
if (this.refs.EmailInput) {
this.refs.EmailInput.focus();
}
}}/>
</View>
<View style={styles.inputGrp}>
<Icon name='ios-mail-open-outline' style={styles.icon}/>
<TextInput ref='EmailInput' underlineColorAndroid ='transparent' placeholder='Email' defaultValue={user.memberInfo.member.email?user.memberInfo.member.email:''} placeholderTextColor='#bdc3c7' style={styles.input}
onChangeText ={(text)=>{this.userInfo.email=text }}
onSubmitEditing={() => {
if (this.refs.AddrInput) {
this.refs.AddrInput.focus();
}
}}
/>
</View>
<TouchableOpacity
onPress={() => {}}>
<View style={[styles.inputGrp,{paddingVertical:8}]}>
<Icon name='ios-call-outline' style={{color: '#fff'}}/>
<Include.Text style={{color:'#ffffff',fontSize:15, paddingLeft: 15}}>{this.userInfo.phone} </Include.Text>
{/*<TextInput underlineColorAndroid ='transparent' placeholder='Số điện thoại' defaultValue={this.userInfo.phone} placeholderTextColor='#bdc3c7' style={styles.input}
editable={false} />*/}
</View>
</TouchableOpacity>
</View>
<Button
style={{alignSelf: 'center'}}
onPress={this.handleUpdateProfile}>
<Text>Cập nhật</Text>
</Button>
{appSetting.mode === 'shop' ?
<TouchableOpacity
onPress={() => {
Actions.AuthenticationScreen({
type: 'reset'
})
}}>
<Include.Text style={{paddingRight: 20, paddingTop: 5, color: '#3498db', textAlign: 'right', fontSize: 16}}>Thông tin xác thực</Include.Text>
</TouchableOpacity>
: null}
<Include.Text style={{alignSelf: 'center', backgroundColor: 'transparent', color: '#ecf0f1', fontStyle: 'italic', paddingTop: 5}}>{`Mã giới thiệu: ${_.get(user, 'memberInfo.member.phone')}`}</Include.Text>
</View>
</View>
</View>
)
}
}
// function bindAction(dispatch) {
// return {
// }
// }
//
// export default connect(null, bindAction)(Settings);
export default Settings;
| e += 'Họ và tên không được để trống';
}
if(email === '') {
| conditional_block |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
}
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
} |
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct Numberer<V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
}
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | } | random_line_split |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) |
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
}
}
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct Numberer<V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
}
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
} | conditional_block |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
}
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
}
}
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct | <V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
}
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | Numberer | identifier_name |
main.rs | use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::collections::HashMap;
use std::fmt::Display;
use std::fmt;
use std::hash::Hash;
use std::io::Write;
use clap::{App, AppSettings, Arg, ArgMatches};
use conllx::io::{Reader, ReadSentence};
use conllx::token::Token;
use failure::{Error};
use itertools::Itertools;
use stdinout::OrExit;
pub fn main() -> Result<(), Error> {
let matches = parse_args();
let val_path = matches
.value_of(VALIDATION)
.or_exit("Missing input path", 1);
let val_file = File::open(val_path).or_exit("Can't open validation file.", 1);
let mut val_reader = Reader::new(BufReader::new(val_file));
let pred_path = matches
.value_of(PREDICTION)
.or_exit("Missing input path", 1);
let pred_file = File::open(pred_path)?;
let mut pred_reader = Reader::new(BufReader::new(pred_file));
let mut deprel_confusion = Confusion::<String>::new("Deprels");
let mut distance_confusion = Confusion::<usize>::new("Dists");
let skip_punct = matches.is_present(SKIP_PUNCTUATION);
let mut correct_head = 0;
let mut correct_head_label = 0;
let mut total = 0;
while let (Ok(Some(val_sentence)), Ok(Some(pred_sentence))) = (val_reader.read_sentence(), pred_reader.read_sentence()) {
assert_eq!(val_sentence.len(), pred_sentence.len());
for (idx, (val_token, pred_token)) in val_sentence
.iter()
.filter_map(|t| t.token())
.zip(pred_sentence.iter().filter_map(|t| t.token()))
.enumerate() {
assert_eq!(val_token.form(), pred_token.form());
if skip_punct {
if val_token.pos().expect("Validation token missing POS").starts_with("PUNCT") {
continue
}
}
let idx = idx+1 ;
let val_triple = val_sentence.dep_graph().head(idx).unwrap();
let val_head = val_triple.head();
let val_dist = i64::abs(val_head as i64 - idx as i64) as usize;
let val_rel = val_triple.relation().unwrap();
let pred_triple = pred_sentence.dep_graph().head(idx).unwrap();;
let pred_head = pred_triple.head();
let pred_dist = i64::abs(pred_head as i64 - idx as i64) as usize;
let pred_rel = pred_triple.relation().unwrap();
distance_confusion.insert(val_dist, pred_dist);
deprel_confusion.insert(val_rel, pred_rel);
correct_head += (pred_head == val_head) as usize;
correct_head_label += (pred_triple == val_triple) as usize;
total += 1;
}
}
if let Ok(Some(_)) = val_reader.read_sentence() {
eprintln!("Val reader not exhausted.");
std::process::exit(1)
}
if let Ok(Some(_)) = pred_reader.read_sentence() {
eprintln!("Pred reader not exhausted.");
std::process::exit(1)
}
println!("UAS: {:.4}", correct_head as f32 / total as f32);
println!("LAS: {:.4}", correct_head_label as f32 / total as f32);
if let Some(file_name) = matches.value_of(DEPREL_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DEPREL_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
deprel_confusion.write_accuracies(&mut writer).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_CONFUSION) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
write!(writer, "{}", distance_confusion).unwrap();
// write!(writer, "{}", deprel_confusion).unwrap();
}
if let Some(file_name) = matches.value_of(DISTANCE_ACCURACIES) {
let out = File::create(file_name).unwrap();
let mut writer = BufWriter::new(out);
distance_confusion.write_accuracies(&mut writer).unwrap();
}
Ok(())
}
static DEFAULT_CLAP_SETTINGS: &[AppSettings] = &[
AppSettings::DontCollapseArgsInUsage,
AppSettings::UnifiedHelpMessage,
];
// Argument constants
static VALIDATION: &str = "VALIDATION";
static PREDICTION: &str = "PREDICTION";
static DEPREL_CONFUSION: &str = "deprel_confusion";
static DEPREL_ACCURACIES: &str = "deprel_accuracies";
static DISTANCE_ACCURACIES: &str = "distance_confusion";
static DISTANCE_CONFUSION: &str = "distance_accuracies";
static SKIP_PUNCTUATION: &str = "skip_punctuation";
fn parse_args() -> ArgMatches<'static> {
App::new("reduce-ptb")
.settings(DEFAULT_CLAP_SETTINGS)
.arg(
Arg::with_name(VALIDATION)
.help("VALIDATION file")
.index(1)
.required(true),
)
.arg(
Arg::with_name(PREDICTION)
.index(2)
.help("PREDICTION")
.required(true),
)
.arg(
Arg::with_name(DEPREL_CONFUSION)
.takes_value(true)
.long(DEPREL_CONFUSION)
.help("print deprel confusion matrix to file")
)
.arg(
Arg::with_name(DISTANCE_CONFUSION)
.takes_value(true)
.long(DISTANCE_CONFUSION)
.help("print DISTANCE_CONFUSION matrix to file")
)
.arg(
Arg::with_name(DISTANCE_ACCURACIES)
.takes_value(true)
.long(DISTANCE_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(DEPREL_ACCURACIES)
.takes_value(true)
.long(DEPREL_ACCURACIES)
.help("print DISTANCE_ACCURACIES to file")
)
.arg(
Arg::with_name(SKIP_PUNCTUATION)
.long(SKIP_PUNCTUATION)
.help("Ignore punctuation.")
)
.get_matches()
}
pub trait GetFeature {
fn get_feature(&self, name: &str) -> Option<&str>;
}
impl GetFeature for Token {
fn get_feature(&self, name: &str) -> Option<&str> {
if let Some(features) = self.features() {
if let Some(feature) = features.as_map().get(name) {
return feature.as_ref().map(|f| f.as_str())
}
}
None
}
}
pub struct Confusion<V> {
confusion: Vec<Vec<usize>>,
numberer: Numberer<V>,
name: String,
}
impl<V> Confusion<V> where V: Clone + Hash + Eq {
pub fn new(name: impl Into<String>) -> Self {
Confusion {
confusion: Vec::new(),
numberer: Numberer::new(),
name: name.into(),
}
}
pub fn insert<S>(&mut self, target: S, prediction: S) where S: Into<V> {
let target_idx = self.numberer.number(target);
let pred_idx = self.numberer.number(prediction);
while target_idx >= self.confusion.len() || pred_idx >= self.confusion.len() {
self.confusion.push(vec![0; self.confusion.len()]);
self.confusion
.iter_mut()
.for_each(|row| row.push(0));
}
self.confusion[target_idx][pred_idx] += 1;
}
}
impl<V> Confusion<V> {
pub fn numberer(&self) -> &Numberer<V> {
&self.numberer
}
}
impl<V> Confusion<V> where V: ToString {
fn write_accuracies(&self, mut w: impl Write) -> Result<(), Error> {
for (idx, item) in self.numberer.idx2val.iter().map(V::to_string).enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
let total = row.iter().sum::<usize>();
let acc = correct as f32 / total as f32;
writeln!(w, "{}\t{}\t{:.04}", item, total, acc)?;
}
Ok(())
}
pub fn write_to_file(&self, mut w: impl Write, sep: &str) -> Result<(), Error> {
writeln!(w, "{}", self.numberer.idx2val.iter().map(ToString::to_string).join(sep))?;
for i in 0..self.confusion.len() {
writeln!(w, "{}", self.confusion[i].iter().map(|n| n.to_string()).join(sep))?;
}
Ok(())
}
}
impl<V> Display for Confusion<V> where V: ToString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "{}\t{}", self.name, self.numberer.idx2val.iter().map(ToString::to_string).join("\t"))?;
let mut total_correct = 0;
let mut full_total = 0;
for (idx, val) in self.numberer.idx2val.iter().enumerate() {
let row = &self.confusion[idx];
let correct = row[idx];
total_correct += correct;
let total = row.iter().sum::<usize>();
full_total += total;
let acc = correct as f32 / total as f32;
writeln!(f, "{}\t{}\t{:.4}", val.to_string(), self.confusion[idx].iter().map(|n| n.to_string()).join("\t"), acc)?;
}
let mut delim = String::new();
let mut precs = String::new();
for i in 0..self.confusion.len() {
let mut false_pos = 0;
for j in 0..self.confusion.len() {
if j == i {
continue
}
false_pos += self.confusion[j][i]
}
let prec = self.confusion[i][i] as f32 / (self.confusion[i][i] + false_pos) as f32;
precs.push_str(&format!("\t{:.4}", prec));
delim.push_str("\t____");
}
writeln!(f, "{}", delim)?;
writeln!(f, "{}", precs)?;
let acc = total_correct as f32 / full_total as f32;
writeln!(f, "acc: {:.4}", acc)?;
Ok(())
}
}
pub struct Numberer<V>{
val2idx: HashMap<V, usize>,
idx2val: Vec<V>,
}
impl<V> Numberer<V> where V: Clone + Hash + Eq {
pub fn new() -> Self |
fn number<S>(&mut self, val: S) -> usize where S: Into<V> {
let val = val.into();
if let Some(idx) = self.val2idx.get(&val) {
*idx
} else {
let n_vals = self.val2idx.len();
self.val2idx.insert(val.clone(), n_vals);
self.idx2val.push(val);
n_vals
}
}
pub fn get_number(&self, val: &V) -> Option<usize> {
self.val2idx.get(val).map(|idx| *idx)
}
}
impl<V> Numberer<V> {
pub fn len(&self) -> usize {
self.idx2val.len()
}
pub fn is_empty(&self) -> bool {
self.idx2val.is_empty()
}
pub fn get_val(&self, idx: usize) -> Option<&V> {
self.idx2val.get(idx)
}
} | {
Numberer {
val2idx: HashMap::new(),
idx2val: Vec::new(),
}
} | identifier_body |
bundle.js | "use strict";
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function setCookie(cname, cvalue, exdays) {
var d = new Date();
d.setTime(d.getTime() + exdays * 24 * 60 * 60 * 1000);
var expires = "expires=" + d.toUTCString();
document.cookie = cname + "=" + cvalue + ";" + expires + ";path=/";
}
function getCookie(cname) {
var name = cname + "=";
var ca = document.cookie.split(';');
for (var i = 0; i < ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
}
if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
}
function getScrollbarWidth() {
var outer = document.createElement("div");
outer.style.visibility = "hidden";
outer.style.width = "100px";
outer.style.msOverflowStyle = "scrollbar"; // needed for WinJS apps
document.body.appendChild(outer);
var widthNoScroll = outer.offsetWidth;
// force scrollbars
outer.style.overflow = "scroll";
// add innerdiv
var inner = document.createElement("div");
inner.style.width = "100%";
outer.appendChild(inner);
var widthWithScroll = inner.offsetWidth;
// remove divs
outer.parentNode.removeChild(outer);
//console.log(widthNoScroll, widthWithScroll);
return widthNoScroll - widthWithScroll;
}
window.loader = function (ev) {
var $body = $('body');
var $preloader = $('#preloader');
if (ev === 'show') {
show();
}
if (ev === 'hide') {
hide();
}
function show() {
$body.addClass('loading');
$preloader.addClass('opacity').fadeIn(200);
}
function hide() {
$body.removeClass('loading');
$preloader.fadeOut(200).removeClass('opacity');
}
};
var EventsSlider = {
slider: $('.events-slider'),
init: function init() {
var isInit = this.slider.hasClass('slick-initialized');
if (!isInit) {
this.slider.slick({
slide: '.item',
slidesToShow: 5,
slidesToScroll: 1,
centerMode: false,
centerPadding: '0%',
infinite: false,
arrows: false,
autoplay: false,
dots: false,
unslicked: true,
prevArrow: '<button type="button" class="slick-prev"><span class="icon-chevron-thin-left"></span></button>',
nextArrow: '<button type="button" class="slick-next"><span class="icon-chevron-thin-right"></span></button>',
responsive: [{
breakpoint: 1601,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '33%'
}
}, {
breakpoint: 1025,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '25%'
}
}, {
breakpoint: 580,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '20%'
}
}, {
breakpoint: 440,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '15%'
}
}, {
breakpoint: 370,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '12%'
}
}, {
breakpoint: 340,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '10%'
}
}]
});
this.slider.slick('slickGoTo', 1);
}
},
destroy: function destroy() {
var isInit = this.slider.hasClass('slick-initialized');
console.log(isInit);
if (isInit) {
$('.events-slider').slick('unslick');
}
}
};
var $ = jQuery.noConflict();
var throttle = function throttle(type, name, obj) {
var running = false;
var object = obj || window;
var func = function func() {
if (running) {
return;
}
running = true;
requestAnimationFrame(function () {
object.dispatchEvent(new CustomEvent(name));
running = false;
});
};
object.addEventListener(type, func);
};
function deviceType() {
return window.getComputedStyle(document.querySelector('body'), '::before').getPropertyValue('content').replace(/'/g, '').replace(/"/g, '');
}
function checkDeviceType(MQ, isMobile, isTablet, isDesktop, arrCbs) {
if (MQ === 'desktop' && isDesktop) {
arrCbs[0]();
} else if (MQ === 'tablet' && isTablet) {
arrCbs[1]();
} else if (MQ === 'mobile' && isMobile) {
arrCbs[2]();
}
//console.log('checkDeviceType:' + MQ);
}
function staticInit(mq, firstFunc, otherFunc, secFunc) {
if (mq === 'desktop') {
firstFunc();
} else if (mq === 'tablet') {
otherFunc();
} else if (mq === 'mobile') {
secFunc();
}
// console.log('staticInit:' + mq);
}
(function () {
var $window = $(window);
var $document = $(document);
var $body = $('body');
var $html = $('html');
var Android = navigator.userAgent.match(/Android/i) && !navigator.userAgent.match(/(Windows\sPhone)/i) ? true : false;
var App = function App() {
var _this = this;
_classCallCheck(this, App);
this.init = function () {
var self = _this;
if (Android) {
$('html').addClass('android');
}
//$('.field-account-number > input').inputmask('Regex', { regex: "^[1-9][0-9][0-9][0-9][0-9][0-9][0-9]?$|^100$" });
/*$('input[type="tel"]').inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-code > input').inputmask({
"mask": "9 9 9 9 9",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-account-number > input').inputmask({
"mask": "9 9 9 9 9 9 9",
'clearMaskOnLostFocus': true,
'clearIncomplete': true
});*/
/* var maxYear = new Date().getFullYear() - 17;
$("#dob").datepicker({
container: '.ll-skin-lugo',
changeMonth: true,
changeYear: true,
yearRange: "1940:" + maxYear,
//minDate: new Date(1940, 1 - 1, 1),
maxDate: new Date(maxYear, 12 - 1, 1),
regional: 'ru',
beforeShow: function beforeShow(textbox, instance) {
$('.DivToAppendPicker').append($('#ui-datepicker-div'));
}
});*/
//$('.cuSelect > select').styler();
/* _this.initMmenu();
_this.modalEvents();
_this.customScroll();
_this.scrollToTop();
_this.scrollToId();
_this.popovers();
_this.openTask();
_this.openRegistration();
_this.toggleInput();
_this.togglePass();*/
_this.snowInit();
};
this.handleLoad = function () {
$('body').removeClass('loading');
$('#preloader').fadeOut(200);
//$('header').addClass('show');
};
this.switchToMobile = function () {
console.log('switchToMobile: Mobile');
};
this.switchToTablet = function () {
console.log('switchToTablet: Tablet');
};
this.switchToDesktop = function () {
console.log('switchToDesktop: Desktop');
};
this.handleResize = function () {
//console.log('resize');
};
this.destroy = function () {};
this.handleScroll = function () {};
this.scrollToTop = function () {
var $sctollToTop = $(".scrollToTop");
$(window).scroll(function () {
if ($(this).scrollTop() > 300) {
$('.scrollToTop').fadeIn();
} else {
$('.scrollToTop').fadeOut();
}
});
//Click event to scroll to top
$sctollToTop.click(function (e) {
e.preventDefault();
$('html, body').animate({ scrollTop: 0 }, 800);
return false;
});
};
this.scrollToId = function () {
var $el = $('.jsScrollTo');
$el.click(function (e) {
e.preventDefault();
var $scrollTo = $(this).attr('href');
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
});
};
this.initMmenu = function () {
var $mobileNav = $('#mobile-nav');
var $mobileNavBtn = $('#show-mobile-menu');
if ($('#mobile-nav').length) {
$mobileNav.mmenu({
extensions: ["border-none", "fx-menu-fade", "fx-listitems-slide", "position-front", "fullscreen"],
navbars: {
add: false,
position: "right",
content: ["close"]
}
}, {
clone: false,
offCanvas: {
pageSelector: "#page"
}
});
var mobAPI = $mobileNav.data("mmenu");
$mobileNavBtn.on('click', mobAPI.open);
// $document.on('click', '#show-mobile-menu', function (e) {
// e.preventDefault();
// e.stopPropagation();
// mobAPI.close;
// });
mobAPI.bind('open', function () {
$mobileNavBtn.addClass('is-active');
});
mobAPI.bind('close', function () {
$mobileNavBtn.removeClass('is-active');
});
$(window).on("orientationchange", function (event) {
$mobileNavBtn.removeClass('is-active');
mobAPI.close();
});
}
};
this.openTask = function () {
var audio = document.getElementById("audio");
var cookieTasks = getCookie('openedTasks');
var openedTasksArray = [];
if (cookieTasks.length !== 0) |
if ($('body').hasClass('logged')) {
$.each(openedTasksArray, function (i, item) {
$('.item-event[data-taskID=' + item + ']').removeClass('disabled');
});
}
$('.item-event').on('click', function (e) {
var logged = $('body').hasClass('logged');
var disabled = $(this).hasClass('disabled');
if (!logged) {
$("#modalLogin").modal("show");
} else {
if (disabled) {
audio.play();
openedTasksArray.push($(this).attr('data-taskID'));
setCookie('openedTasks', JSON.stringify(openedTasksArray), 1);
$(this).removeClass('disabled');
}
}
});
};
this.modalEvents = function () {
$('.modal').on('shown.bs.modal', function () {}).on('hidden.bs.modal', function () {
$('body').css('padding-right', '0');
}).on('show.bs.modal', function (e) {
testAnim('bounceInLeft');
}).on('hide.bs.modal', function (e) {
testAnim('bounceOutRight');
});
$('.modal-terms').on('show.bs.modal', function () {
$(this).append('<div class="modal-backdrop fade in" data-dismiss="modal" aria-label="Close"></div>');
}).on('hide.bs.modal', function (e) {
$(this).find('.modal-backdrop').remove();
});
$('#modalRegistration').on('hidden.bs.modal', function (e) {
var cookieUid = getCookie('uid');
if (cookieUid.length > 2) {
location.reload();
}
});
function closeOpenModal(modal) {
$('.modal').modal('hide');
setTimeout(function () {
$(modal).modal('show');
}, 550);
};
function testAnim(x) {
$('.modal .modal-dialog').attr('class', 'modal-dialog ' + x + ' animated');
};
};
this.popovers = function () {
var popover = $('[data-toggle="popover"]');
popover.popover({
html: true,
content: function content() {
var content = $(this).attr("data-popover-content");
return $(content).children(".popover-body").html();
},
title: function title() {
var title = $(this).attr("data-popover-content");
return $(title).children(".popover-heading").html();
}
});
};
this.scrollDown = function () {
$('.btn-scroll-down').on('click', function (e) {
e.preventDefault();
if ($('html').hasClass('fp-enabled')) {
$.fn.fullpage.moveTo('instruction');
} else {
var $scrollTo = '#info';
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
}
});
};
this.customScroll = function () {
$(window).load(function () {
$('.cuScroll').mCustomScrollbar({
axis: "y",
scrollInertia: 0,
//theme: '3d',
scrollButtons: { enable: false },
mouseWheel: { enable: true }
});
});
};
this.wowAnimation = function () {
var wow = new WOW({
boxClass: 'wow', // animated element css class (default is wow)
animateClass: 'animated', // animation css class (default is animated)
offset: 0, // distance to the element when triggering the animation (default is 0)
mobile: false, // trigger animations on mobile devices (default is true)
live: true, // act on asynchronously loaded content (default is true)
callback: function callback(box) {
// the callback is fired every time an animation is started
// the argument that is passed in is the DOM node being animated
},
scrollContainer: null // optional scroll container selector, otherwise use window
});
wow.init();
};
this.openRegistration = function () {
$('#open-modal-reg').on('click', function (e) {
e.preventDefault();
$("#modalLogin").modal("hide");
setTimeout(function () {
$("#modalRegistration").modal("show");
}, 550);
});
};
this.togglePass = function () {
var inputPass = $('#passwd');
$('.btn-show-pass').on('click', function (e) {
e.preventDefault();
if ($(this).hasClass('active')) {
$(this).removeClass('active');
inputPass.attr('type', 'password');
} else {
$(this).addClass('active');
inputPass.attr('type', 'text');
}
});
};
this.toggleInput = function () {
var emailField = {
type: 'email',
name: 'email',
placeholder: 'Email *',
btnType: 'email',
id: 'email'
};
var phoneField = {
type: 'tel',
name: 'telno',
placeholder: 'Телефон *',
btnType: 'phone',
id: 'telno'
};
$('.btn-change').on('click', function (e) {
e.preventDefault();
var typeField = $(this).attr('data-type');
var input = $(this).parents('.group-email-phone').find('input');
if (typeField == 'email') {
input.val('');
input.attr('type', phoneField.type);
input.attr('name', phoneField.name);
input.attr('id', phoneField.id);
input.attr('placeholder', phoneField.placeholder);
$(this).attr('data-type', phoneField.btnType);
input.inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false
});
} else if (typeField == 'phone') {
input.inputmask('unmaskedvalue');
input.inputmask('remove');
input.val('');
input.attr('type', emailField.type);
input.attr('name', emailField.name);
input.attr('id', emailField.id);
input.attr('placeholder', emailField.placeholder);
$(this).attr('data-type', emailField.btnType);
}
});
};
this.snowInit = function () {
var Snow = function Snow(canvas, count) {
var ctx = canvas.getContext('2d');
var snowflakes = [];
var add = function add(item) {
return snowflakes.push(item(canvas));
};
var update = function update() {
return _.forEach(snowflakes, function (el) {
return el.update();
});
};
var resize = function resize() {
ctx.canvas.width = canvas.offsetWidth;
ctx.canvas.height = canvas.offsetHeight;
_.forEach(snowflakes, function (el) {
return el.resized();
});
};
var draw = function draw() {
ctx.clearRect(0, 0, canvas.offsetWidth, canvas.offsetHeight);
_.forEach(snowflakes, function (el) {
return el.draw();
});
};
var events = function events() {
window.addEventListener('resize', resize);
};
var loop = function loop() {
draw();
update();
animFrame(loop);
};
var init = function init() {
_.times(count, function () {
return add(function (canvas) {
return new SnowItem(canvas);
});
});
events();
loop();
};
init(count);
resize();
return { add: add, resize: resize };
};
var defaultOptions = {
color: '#fff',
radius: [0.5, 2.0],
speed: [1, 1],
wind: [-0.1, 1.0]
};
var SnowItem = function SnowItem(canvas) {
var _ref, _ref2, _ref3;
var drawFn = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var opts = arguments[2];
var options = _extends({}, defaultOptions, opts);
var radius = options.radius,
speed = options.speed,
wind = options.wind,
color = options.color;
var params = {
color: color,
x: _.random(0, canvas.offsetWidth),
y: _.random(-canvas.offsetHeight, 0),
radius: (_ref = _).random.apply(_ref, _toConsumableArray(radius)),
speed: (_ref2 = _).random.apply(_ref2, _toConsumableArray(speed)),
wind: (_ref3 = _).random.apply(_ref3, _toConsumableArray(wind)),
isResized: false
};
var ctx = canvas.getContext('2d');
var updateData = function updateData() {
params.x = _.random(0, canvas.offsetWidth);
params.y = _.random(-canvas.offsetHeight, 0);
};
var resized = function resized() {
return params.isResized = true;
};
var drawDefault = function drawDefault() {
ctx.beginPath();
ctx.arc(params.x, params.y, params.radius, 0, 2 * Math.PI);
ctx.fillStyle = params.color;
ctx.fill();
ctx.closePath();
};
var draw = drawFn ? function () {
return drawFn(ctx, params);
} : drawDefault;
var translate = function translate() {
params.y += params.speed;
params.x += params.wind;
};
var onDown = function onDown() {
if (params.y < canvas.offsetHeight) return;
if (params.isResized) {
updateData();
params.isResized = false;
} else {
params.y = 0;
params.x = _.random(0, canvas.offsetWidth);
}
};
var update = function update() {
translate();
onDown();
};
return {
update: update,
resized: resized,
draw: draw
};
};
var el = document.querySelector('.page-content');
var wrapper = document.querySelector('.box-snow');
var canvas = document.getElementById('snow');
var el1 = document.querySelector('.box-snow-front');
var wrapper1 = document.querySelector('.box');
var canvas1 = document.getElementById('snow-front');
var animFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
if ($('#snow').length) {
var snow = Snow(canvas, 80);
}
if ($('#snow-front').length) {
var snow1 = Snow(canvas1, 200);
}
};
};
var projectApp = new App();
var MQ = deviceType();
var isMobile = false;
var isTablet = true;
var isDesktop = false;
throttle('resize', 'optimizedResize');
function switchDeviceType(mq) {
if (mq === 'desktop' && isDesktop) {
isDesktop = false;
isTablet = true;
isMobile = false;
} else if (mq === 'tablet' && isTablet) {
isMobile = true;
isDesktop = true;
isTablet = false;
} else if (mq === 'mobile' && isMobile) {
isMobile = false;
isTablet = true;
isDesktop = false;
}
//console.log('switchDeviceType: ' + mq);
}
staticInit(MQ, projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile);
$window.on('optimizedResize', function () {
var mq = deviceType();
checkDeviceType(mq, isMobile, isTablet, isDesktop, [projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile]);
switchDeviceType(mq);
});
$window.on('DOMContentLoaded', projectApp.init()).on('scroll', function () {
return projectApp.handleScroll();
}).on('load', function () {
return projectApp.handleLoad();
}).on('resize', function () {
return projectApp.handleResize();
});
})(); | {
openedTasksArray = JSON.parse(cookieTasks);
} | conditional_block |
bundle.js | "use strict";
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function setCookie(cname, cvalue, exdays) {
var d = new Date();
d.setTime(d.getTime() + exdays * 24 * 60 * 60 * 1000);
var expires = "expires=" + d.toUTCString();
document.cookie = cname + "=" + cvalue + ";" + expires + ";path=/";
}
function getCookie(cname) {
var name = cname + "=";
var ca = document.cookie.split(';');
for (var i = 0; i < ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
} | if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
}
function getScrollbarWidth() {
var outer = document.createElement("div");
outer.style.visibility = "hidden";
outer.style.width = "100px";
outer.style.msOverflowStyle = "scrollbar"; // needed for WinJS apps
document.body.appendChild(outer);
var widthNoScroll = outer.offsetWidth;
// force scrollbars
outer.style.overflow = "scroll";
// add innerdiv
var inner = document.createElement("div");
inner.style.width = "100%";
outer.appendChild(inner);
var widthWithScroll = inner.offsetWidth;
// remove divs
outer.parentNode.removeChild(outer);
//console.log(widthNoScroll, widthWithScroll);
return widthNoScroll - widthWithScroll;
}
window.loader = function (ev) {
var $body = $('body');
var $preloader = $('#preloader');
if (ev === 'show') {
show();
}
if (ev === 'hide') {
hide();
}
function show() {
$body.addClass('loading');
$preloader.addClass('opacity').fadeIn(200);
}
function hide() {
$body.removeClass('loading');
$preloader.fadeOut(200).removeClass('opacity');
}
};
var EventsSlider = {
slider: $('.events-slider'),
init: function init() {
var isInit = this.slider.hasClass('slick-initialized');
if (!isInit) {
this.slider.slick({
slide: '.item',
slidesToShow: 5,
slidesToScroll: 1,
centerMode: false,
centerPadding: '0%',
infinite: false,
arrows: false,
autoplay: false,
dots: false,
unslicked: true,
prevArrow: '<button type="button" class="slick-prev"><span class="icon-chevron-thin-left"></span></button>',
nextArrow: '<button type="button" class="slick-next"><span class="icon-chevron-thin-right"></span></button>',
responsive: [{
breakpoint: 1601,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '33%'
}
}, {
breakpoint: 1025,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '25%'
}
}, {
breakpoint: 580,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '20%'
}
}, {
breakpoint: 440,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '15%'
}
}, {
breakpoint: 370,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '12%'
}
}, {
breakpoint: 340,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '10%'
}
}]
});
this.slider.slick('slickGoTo', 1);
}
},
destroy: function destroy() {
var isInit = this.slider.hasClass('slick-initialized');
console.log(isInit);
if (isInit) {
$('.events-slider').slick('unslick');
}
}
};
var $ = jQuery.noConflict();
var throttle = function throttle(type, name, obj) {
var running = false;
var object = obj || window;
var func = function func() {
if (running) {
return;
}
running = true;
requestAnimationFrame(function () {
object.dispatchEvent(new CustomEvent(name));
running = false;
});
};
object.addEventListener(type, func);
};
function deviceType() {
return window.getComputedStyle(document.querySelector('body'), '::before').getPropertyValue('content').replace(/'/g, '').replace(/"/g, '');
}
function checkDeviceType(MQ, isMobile, isTablet, isDesktop, arrCbs) {
if (MQ === 'desktop' && isDesktop) {
arrCbs[0]();
} else if (MQ === 'tablet' && isTablet) {
arrCbs[1]();
} else if (MQ === 'mobile' && isMobile) {
arrCbs[2]();
}
//console.log('checkDeviceType:' + MQ);
}
function staticInit(mq, firstFunc, otherFunc, secFunc) {
if (mq === 'desktop') {
firstFunc();
} else if (mq === 'tablet') {
otherFunc();
} else if (mq === 'mobile') {
secFunc();
}
// console.log('staticInit:' + mq);
}
(function () {
var $window = $(window);
var $document = $(document);
var $body = $('body');
var $html = $('html');
var Android = navigator.userAgent.match(/Android/i) && !navigator.userAgent.match(/(Windows\sPhone)/i) ? true : false;
var App = function App() {
var _this = this;
_classCallCheck(this, App);
this.init = function () {
var self = _this;
if (Android) {
$('html').addClass('android');
}
//$('.field-account-number > input').inputmask('Regex', { regex: "^[1-9][0-9][0-9][0-9][0-9][0-9][0-9]?$|^100$" });
/*$('input[type="tel"]').inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-code > input').inputmask({
"mask": "9 9 9 9 9",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-account-number > input').inputmask({
"mask": "9 9 9 9 9 9 9",
'clearMaskOnLostFocus': true,
'clearIncomplete': true
});*/
/* var maxYear = new Date().getFullYear() - 17;
$("#dob").datepicker({
container: '.ll-skin-lugo',
changeMonth: true,
changeYear: true,
yearRange: "1940:" + maxYear,
//minDate: new Date(1940, 1 - 1, 1),
maxDate: new Date(maxYear, 12 - 1, 1),
regional: 'ru',
beforeShow: function beforeShow(textbox, instance) {
$('.DivToAppendPicker').append($('#ui-datepicker-div'));
}
});*/
//$('.cuSelect > select').styler();
/* _this.initMmenu();
_this.modalEvents();
_this.customScroll();
_this.scrollToTop();
_this.scrollToId();
_this.popovers();
_this.openTask();
_this.openRegistration();
_this.toggleInput();
_this.togglePass();*/
_this.snowInit();
};
this.handleLoad = function () {
$('body').removeClass('loading');
$('#preloader').fadeOut(200);
//$('header').addClass('show');
};
this.switchToMobile = function () {
console.log('switchToMobile: Mobile');
};
this.switchToTablet = function () {
console.log('switchToTablet: Tablet');
};
this.switchToDesktop = function () {
console.log('switchToDesktop: Desktop');
};
this.handleResize = function () {
//console.log('resize');
};
this.destroy = function () {};
this.handleScroll = function () {};
this.scrollToTop = function () {
var $sctollToTop = $(".scrollToTop");
$(window).scroll(function () {
if ($(this).scrollTop() > 300) {
$('.scrollToTop').fadeIn();
} else {
$('.scrollToTop').fadeOut();
}
});
//Click event to scroll to top
$sctollToTop.click(function (e) {
e.preventDefault();
$('html, body').animate({ scrollTop: 0 }, 800);
return false;
});
};
this.scrollToId = function () {
var $el = $('.jsScrollTo');
$el.click(function (e) {
e.preventDefault();
var $scrollTo = $(this).attr('href');
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
});
};
this.initMmenu = function () {
var $mobileNav = $('#mobile-nav');
var $mobileNavBtn = $('#show-mobile-menu');
if ($('#mobile-nav').length) {
$mobileNav.mmenu({
extensions: ["border-none", "fx-menu-fade", "fx-listitems-slide", "position-front", "fullscreen"],
navbars: {
add: false,
position: "right",
content: ["close"]
}
}, {
clone: false,
offCanvas: {
pageSelector: "#page"
}
});
var mobAPI = $mobileNav.data("mmenu");
$mobileNavBtn.on('click', mobAPI.open);
// $document.on('click', '#show-mobile-menu', function (e) {
// e.preventDefault();
// e.stopPropagation();
// mobAPI.close;
// });
mobAPI.bind('open', function () {
$mobileNavBtn.addClass('is-active');
});
mobAPI.bind('close', function () {
$mobileNavBtn.removeClass('is-active');
});
$(window).on("orientationchange", function (event) {
$mobileNavBtn.removeClass('is-active');
mobAPI.close();
});
}
};
this.openTask = function () {
var audio = document.getElementById("audio");
var cookieTasks = getCookie('openedTasks');
var openedTasksArray = [];
if (cookieTasks.length !== 0) {
openedTasksArray = JSON.parse(cookieTasks);
}
if ($('body').hasClass('logged')) {
$.each(openedTasksArray, function (i, item) {
$('.item-event[data-taskID=' + item + ']').removeClass('disabled');
});
}
$('.item-event').on('click', function (e) {
var logged = $('body').hasClass('logged');
var disabled = $(this).hasClass('disabled');
if (!logged) {
$("#modalLogin").modal("show");
} else {
if (disabled) {
audio.play();
openedTasksArray.push($(this).attr('data-taskID'));
setCookie('openedTasks', JSON.stringify(openedTasksArray), 1);
$(this).removeClass('disabled');
}
}
});
};
this.modalEvents = function () {
$('.modal').on('shown.bs.modal', function () {}).on('hidden.bs.modal', function () {
$('body').css('padding-right', '0');
}).on('show.bs.modal', function (e) {
testAnim('bounceInLeft');
}).on('hide.bs.modal', function (e) {
testAnim('bounceOutRight');
});
$('.modal-terms').on('show.bs.modal', function () {
$(this).append('<div class="modal-backdrop fade in" data-dismiss="modal" aria-label="Close"></div>');
}).on('hide.bs.modal', function (e) {
$(this).find('.modal-backdrop').remove();
});
$('#modalRegistration').on('hidden.bs.modal', function (e) {
var cookieUid = getCookie('uid');
if (cookieUid.length > 2) {
location.reload();
}
});
function closeOpenModal(modal) {
$('.modal').modal('hide');
setTimeout(function () {
$(modal).modal('show');
}, 550);
};
function testAnim(x) {
$('.modal .modal-dialog').attr('class', 'modal-dialog ' + x + ' animated');
};
};
this.popovers = function () {
var popover = $('[data-toggle="popover"]');
popover.popover({
html: true,
content: function content() {
var content = $(this).attr("data-popover-content");
return $(content).children(".popover-body").html();
},
title: function title() {
var title = $(this).attr("data-popover-content");
return $(title).children(".popover-heading").html();
}
});
};
this.scrollDown = function () {
$('.btn-scroll-down').on('click', function (e) {
e.preventDefault();
if ($('html').hasClass('fp-enabled')) {
$.fn.fullpage.moveTo('instruction');
} else {
var $scrollTo = '#info';
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
}
});
};
this.customScroll = function () {
$(window).load(function () {
$('.cuScroll').mCustomScrollbar({
axis: "y",
scrollInertia: 0,
//theme: '3d',
scrollButtons: { enable: false },
mouseWheel: { enable: true }
});
});
};
this.wowAnimation = function () {
var wow = new WOW({
boxClass: 'wow', // animated element css class (default is wow)
animateClass: 'animated', // animation css class (default is animated)
offset: 0, // distance to the element when triggering the animation (default is 0)
mobile: false, // trigger animations on mobile devices (default is true)
live: true, // act on asynchronously loaded content (default is true)
callback: function callback(box) {
// the callback is fired every time an animation is started
// the argument that is passed in is the DOM node being animated
},
scrollContainer: null // optional scroll container selector, otherwise use window
});
wow.init();
};
this.openRegistration = function () {
$('#open-modal-reg').on('click', function (e) {
e.preventDefault();
$("#modalLogin").modal("hide");
setTimeout(function () {
$("#modalRegistration").modal("show");
}, 550);
});
};
this.togglePass = function () {
var inputPass = $('#passwd');
$('.btn-show-pass').on('click', function (e) {
e.preventDefault();
if ($(this).hasClass('active')) {
$(this).removeClass('active');
inputPass.attr('type', 'password');
} else {
$(this).addClass('active');
inputPass.attr('type', 'text');
}
});
};
this.toggleInput = function () {
var emailField = {
type: 'email',
name: 'email',
placeholder: 'Email *',
btnType: 'email',
id: 'email'
};
var phoneField = {
type: 'tel',
name: 'telno',
placeholder: 'Телефон *',
btnType: 'phone',
id: 'telno'
};
$('.btn-change').on('click', function (e) {
e.preventDefault();
var typeField = $(this).attr('data-type');
var input = $(this).parents('.group-email-phone').find('input');
if (typeField == 'email') {
input.val('');
input.attr('type', phoneField.type);
input.attr('name', phoneField.name);
input.attr('id', phoneField.id);
input.attr('placeholder', phoneField.placeholder);
$(this).attr('data-type', phoneField.btnType);
input.inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false
});
} else if (typeField == 'phone') {
input.inputmask('unmaskedvalue');
input.inputmask('remove');
input.val('');
input.attr('type', emailField.type);
input.attr('name', emailField.name);
input.attr('id', emailField.id);
input.attr('placeholder', emailField.placeholder);
$(this).attr('data-type', emailField.btnType);
}
});
};
this.snowInit = function () {
var Snow = function Snow(canvas, count) {
var ctx = canvas.getContext('2d');
var snowflakes = [];
var add = function add(item) {
return snowflakes.push(item(canvas));
};
var update = function update() {
return _.forEach(snowflakes, function (el) {
return el.update();
});
};
var resize = function resize() {
ctx.canvas.width = canvas.offsetWidth;
ctx.canvas.height = canvas.offsetHeight;
_.forEach(snowflakes, function (el) {
return el.resized();
});
};
var draw = function draw() {
ctx.clearRect(0, 0, canvas.offsetWidth, canvas.offsetHeight);
_.forEach(snowflakes, function (el) {
return el.draw();
});
};
var events = function events() {
window.addEventListener('resize', resize);
};
var loop = function loop() {
draw();
update();
animFrame(loop);
};
var init = function init() {
_.times(count, function () {
return add(function (canvas) {
return new SnowItem(canvas);
});
});
events();
loop();
};
init(count);
resize();
return { add: add, resize: resize };
};
var defaultOptions = {
color: '#fff',
radius: [0.5, 2.0],
speed: [1, 1],
wind: [-0.1, 1.0]
};
var SnowItem = function SnowItem(canvas) {
var _ref, _ref2, _ref3;
var drawFn = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var opts = arguments[2];
var options = _extends({}, defaultOptions, opts);
var radius = options.radius,
speed = options.speed,
wind = options.wind,
color = options.color;
var params = {
color: color,
x: _.random(0, canvas.offsetWidth),
y: _.random(-canvas.offsetHeight, 0),
radius: (_ref = _).random.apply(_ref, _toConsumableArray(radius)),
speed: (_ref2 = _).random.apply(_ref2, _toConsumableArray(speed)),
wind: (_ref3 = _).random.apply(_ref3, _toConsumableArray(wind)),
isResized: false
};
var ctx = canvas.getContext('2d');
var updateData = function updateData() {
params.x = _.random(0, canvas.offsetWidth);
params.y = _.random(-canvas.offsetHeight, 0);
};
var resized = function resized() {
return params.isResized = true;
};
var drawDefault = function drawDefault() {
ctx.beginPath();
ctx.arc(params.x, params.y, params.radius, 0, 2 * Math.PI);
ctx.fillStyle = params.color;
ctx.fill();
ctx.closePath();
};
var draw = drawFn ? function () {
return drawFn(ctx, params);
} : drawDefault;
var translate = function translate() {
params.y += params.speed;
params.x += params.wind;
};
var onDown = function onDown() {
if (params.y < canvas.offsetHeight) return;
if (params.isResized) {
updateData();
params.isResized = false;
} else {
params.y = 0;
params.x = _.random(0, canvas.offsetWidth);
}
};
var update = function update() {
translate();
onDown();
};
return {
update: update,
resized: resized,
draw: draw
};
};
var el = document.querySelector('.page-content');
var wrapper = document.querySelector('.box-snow');
var canvas = document.getElementById('snow');
var el1 = document.querySelector('.box-snow-front');
var wrapper1 = document.querySelector('.box');
var canvas1 = document.getElementById('snow-front');
var animFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
if ($('#snow').length) {
var snow = Snow(canvas, 80);
}
if ($('#snow-front').length) {
var snow1 = Snow(canvas1, 200);
}
};
};
var projectApp = new App();
var MQ = deviceType();
var isMobile = false;
var isTablet = true;
var isDesktop = false;
throttle('resize', 'optimizedResize');
function switchDeviceType(mq) {
if (mq === 'desktop' && isDesktop) {
isDesktop = false;
isTablet = true;
isMobile = false;
} else if (mq === 'tablet' && isTablet) {
isMobile = true;
isDesktop = true;
isTablet = false;
} else if (mq === 'mobile' && isMobile) {
isMobile = false;
isTablet = true;
isDesktop = false;
}
//console.log('switchDeviceType: ' + mq);
}
staticInit(MQ, projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile);
$window.on('optimizedResize', function () {
var mq = deviceType();
checkDeviceType(mq, isMobile, isTablet, isDesktop, [projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile]);
switchDeviceType(mq);
});
$window.on('DOMContentLoaded', projectApp.init()).on('scroll', function () {
return projectApp.handleScroll();
}).on('load', function () {
return projectApp.handleLoad();
}).on('resize', function () {
return projectApp.handleResize();
});
})(); | random_line_split |
|
bundle.js | "use strict";
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
function | (instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function setCookie(cname, cvalue, exdays) {
var d = new Date();
d.setTime(d.getTime() + exdays * 24 * 60 * 60 * 1000);
var expires = "expires=" + d.toUTCString();
document.cookie = cname + "=" + cvalue + ";" + expires + ";path=/";
}
function getCookie(cname) {
var name = cname + "=";
var ca = document.cookie.split(';');
for (var i = 0; i < ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
}
if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
}
function getScrollbarWidth() {
var outer = document.createElement("div");
outer.style.visibility = "hidden";
outer.style.width = "100px";
outer.style.msOverflowStyle = "scrollbar"; // needed for WinJS apps
document.body.appendChild(outer);
var widthNoScroll = outer.offsetWidth;
// force scrollbars
outer.style.overflow = "scroll";
// add innerdiv
var inner = document.createElement("div");
inner.style.width = "100%";
outer.appendChild(inner);
var widthWithScroll = inner.offsetWidth;
// remove divs
outer.parentNode.removeChild(outer);
//console.log(widthNoScroll, widthWithScroll);
return widthNoScroll - widthWithScroll;
}
window.loader = function (ev) {
var $body = $('body');
var $preloader = $('#preloader');
if (ev === 'show') {
show();
}
if (ev === 'hide') {
hide();
}
function show() {
$body.addClass('loading');
$preloader.addClass('opacity').fadeIn(200);
}
function hide() {
$body.removeClass('loading');
$preloader.fadeOut(200).removeClass('opacity');
}
};
var EventsSlider = {
slider: $('.events-slider'),
init: function init() {
var isInit = this.slider.hasClass('slick-initialized');
if (!isInit) {
this.slider.slick({
slide: '.item',
slidesToShow: 5,
slidesToScroll: 1,
centerMode: false,
centerPadding: '0%',
infinite: false,
arrows: false,
autoplay: false,
dots: false,
unslicked: true,
prevArrow: '<button type="button" class="slick-prev"><span class="icon-chevron-thin-left"></span></button>',
nextArrow: '<button type="button" class="slick-next"><span class="icon-chevron-thin-right"></span></button>',
responsive: [{
breakpoint: 1601,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '33%'
}
}, {
breakpoint: 1025,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '25%'
}
}, {
breakpoint: 580,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '20%'
}
}, {
breakpoint: 440,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '15%'
}
}, {
breakpoint: 370,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '12%'
}
}, {
breakpoint: 340,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '10%'
}
}]
});
this.slider.slick('slickGoTo', 1);
}
},
destroy: function destroy() {
var isInit = this.slider.hasClass('slick-initialized');
console.log(isInit);
if (isInit) {
$('.events-slider').slick('unslick');
}
}
};
var $ = jQuery.noConflict();
var throttle = function throttle(type, name, obj) {
var running = false;
var object = obj || window;
var func = function func() {
if (running) {
return;
}
running = true;
requestAnimationFrame(function () {
object.dispatchEvent(new CustomEvent(name));
running = false;
});
};
object.addEventListener(type, func);
};
function deviceType() {
return window.getComputedStyle(document.querySelector('body'), '::before').getPropertyValue('content').replace(/'/g, '').replace(/"/g, '');
}
function checkDeviceType(MQ, isMobile, isTablet, isDesktop, arrCbs) {
if (MQ === 'desktop' && isDesktop) {
arrCbs[0]();
} else if (MQ === 'tablet' && isTablet) {
arrCbs[1]();
} else if (MQ === 'mobile' && isMobile) {
arrCbs[2]();
}
//console.log('checkDeviceType:' + MQ);
}
function staticInit(mq, firstFunc, otherFunc, secFunc) {
if (mq === 'desktop') {
firstFunc();
} else if (mq === 'tablet') {
otherFunc();
} else if (mq === 'mobile') {
secFunc();
}
// console.log('staticInit:' + mq);
}
(function () {
var $window = $(window);
var $document = $(document);
var $body = $('body');
var $html = $('html');
var Android = navigator.userAgent.match(/Android/i) && !navigator.userAgent.match(/(Windows\sPhone)/i) ? true : false;
var App = function App() {
var _this = this;
_classCallCheck(this, App);
this.init = function () {
var self = _this;
if (Android) {
$('html').addClass('android');
}
//$('.field-account-number > input').inputmask('Regex', { regex: "^[1-9][0-9][0-9][0-9][0-9][0-9][0-9]?$|^100$" });
/*$('input[type="tel"]').inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-code > input').inputmask({
"mask": "9 9 9 9 9",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-account-number > input').inputmask({
"mask": "9 9 9 9 9 9 9",
'clearMaskOnLostFocus': true,
'clearIncomplete': true
});*/
/* var maxYear = new Date().getFullYear() - 17;
$("#dob").datepicker({
container: '.ll-skin-lugo',
changeMonth: true,
changeYear: true,
yearRange: "1940:" + maxYear,
//minDate: new Date(1940, 1 - 1, 1),
maxDate: new Date(maxYear, 12 - 1, 1),
regional: 'ru',
beforeShow: function beforeShow(textbox, instance) {
$('.DivToAppendPicker').append($('#ui-datepicker-div'));
}
});*/
//$('.cuSelect > select').styler();
/* _this.initMmenu();
_this.modalEvents();
_this.customScroll();
_this.scrollToTop();
_this.scrollToId();
_this.popovers();
_this.openTask();
_this.openRegistration();
_this.toggleInput();
_this.togglePass();*/
_this.snowInit();
};
this.handleLoad = function () {
$('body').removeClass('loading');
$('#preloader').fadeOut(200);
//$('header').addClass('show');
};
this.switchToMobile = function () {
console.log('switchToMobile: Mobile');
};
this.switchToTablet = function () {
console.log('switchToTablet: Tablet');
};
this.switchToDesktop = function () {
console.log('switchToDesktop: Desktop');
};
this.handleResize = function () {
//console.log('resize');
};
this.destroy = function () {};
this.handleScroll = function () {};
this.scrollToTop = function () {
var $sctollToTop = $(".scrollToTop");
$(window).scroll(function () {
if ($(this).scrollTop() > 300) {
$('.scrollToTop').fadeIn();
} else {
$('.scrollToTop').fadeOut();
}
});
//Click event to scroll to top
$sctollToTop.click(function (e) {
e.preventDefault();
$('html, body').animate({ scrollTop: 0 }, 800);
return false;
});
};
this.scrollToId = function () {
var $el = $('.jsScrollTo');
$el.click(function (e) {
e.preventDefault();
var $scrollTo = $(this).attr('href');
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
});
};
this.initMmenu = function () {
var $mobileNav = $('#mobile-nav');
var $mobileNavBtn = $('#show-mobile-menu');
if ($('#mobile-nav').length) {
$mobileNav.mmenu({
extensions: ["border-none", "fx-menu-fade", "fx-listitems-slide", "position-front", "fullscreen"],
navbars: {
add: false,
position: "right",
content: ["close"]
}
}, {
clone: false,
offCanvas: {
pageSelector: "#page"
}
});
var mobAPI = $mobileNav.data("mmenu");
$mobileNavBtn.on('click', mobAPI.open);
// $document.on('click', '#show-mobile-menu', function (e) {
// e.preventDefault();
// e.stopPropagation();
// mobAPI.close;
// });
mobAPI.bind('open', function () {
$mobileNavBtn.addClass('is-active');
});
mobAPI.bind('close', function () {
$mobileNavBtn.removeClass('is-active');
});
$(window).on("orientationchange", function (event) {
$mobileNavBtn.removeClass('is-active');
mobAPI.close();
});
}
};
this.openTask = function () {
var audio = document.getElementById("audio");
var cookieTasks = getCookie('openedTasks');
var openedTasksArray = [];
if (cookieTasks.length !== 0) {
openedTasksArray = JSON.parse(cookieTasks);
}
if ($('body').hasClass('logged')) {
$.each(openedTasksArray, function (i, item) {
$('.item-event[data-taskID=' + item + ']').removeClass('disabled');
});
}
$('.item-event').on('click', function (e) {
var logged = $('body').hasClass('logged');
var disabled = $(this).hasClass('disabled');
if (!logged) {
$("#modalLogin").modal("show");
} else {
if (disabled) {
audio.play();
openedTasksArray.push($(this).attr('data-taskID'));
setCookie('openedTasks', JSON.stringify(openedTasksArray), 1);
$(this).removeClass('disabled');
}
}
});
};
this.modalEvents = function () {
$('.modal').on('shown.bs.modal', function () {}).on('hidden.bs.modal', function () {
$('body').css('padding-right', '0');
}).on('show.bs.modal', function (e) {
testAnim('bounceInLeft');
}).on('hide.bs.modal', function (e) {
testAnim('bounceOutRight');
});
$('.modal-terms').on('show.bs.modal', function () {
$(this).append('<div class="modal-backdrop fade in" data-dismiss="modal" aria-label="Close"></div>');
}).on('hide.bs.modal', function (e) {
$(this).find('.modal-backdrop').remove();
});
$('#modalRegistration').on('hidden.bs.modal', function (e) {
var cookieUid = getCookie('uid');
if (cookieUid.length > 2) {
location.reload();
}
});
function closeOpenModal(modal) {
$('.modal').modal('hide');
setTimeout(function () {
$(modal).modal('show');
}, 550);
};
function testAnim(x) {
$('.modal .modal-dialog').attr('class', 'modal-dialog ' + x + ' animated');
};
};
this.popovers = function () {
var popover = $('[data-toggle="popover"]');
popover.popover({
html: true,
content: function content() {
var content = $(this).attr("data-popover-content");
return $(content).children(".popover-body").html();
},
title: function title() {
var title = $(this).attr("data-popover-content");
return $(title).children(".popover-heading").html();
}
});
};
this.scrollDown = function () {
$('.btn-scroll-down').on('click', function (e) {
e.preventDefault();
if ($('html').hasClass('fp-enabled')) {
$.fn.fullpage.moveTo('instruction');
} else {
var $scrollTo = '#info';
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
}
});
};
this.customScroll = function () {
$(window).load(function () {
$('.cuScroll').mCustomScrollbar({
axis: "y",
scrollInertia: 0,
//theme: '3d',
scrollButtons: { enable: false },
mouseWheel: { enable: true }
});
});
};
this.wowAnimation = function () {
var wow = new WOW({
boxClass: 'wow', // animated element css class (default is wow)
animateClass: 'animated', // animation css class (default is animated)
offset: 0, // distance to the element when triggering the animation (default is 0)
mobile: false, // trigger animations on mobile devices (default is true)
live: true, // act on asynchronously loaded content (default is true)
callback: function callback(box) {
// the callback is fired every time an animation is started
// the argument that is passed in is the DOM node being animated
},
scrollContainer: null // optional scroll container selector, otherwise use window
});
wow.init();
};
this.openRegistration = function () {
$('#open-modal-reg').on('click', function (e) {
e.preventDefault();
$("#modalLogin").modal("hide");
setTimeout(function () {
$("#modalRegistration").modal("show");
}, 550);
});
};
this.togglePass = function () {
var inputPass = $('#passwd');
$('.btn-show-pass').on('click', function (e) {
e.preventDefault();
if ($(this).hasClass('active')) {
$(this).removeClass('active');
inputPass.attr('type', 'password');
} else {
$(this).addClass('active');
inputPass.attr('type', 'text');
}
});
};
this.toggleInput = function () {
var emailField = {
type: 'email',
name: 'email',
placeholder: 'Email *',
btnType: 'email',
id: 'email'
};
var phoneField = {
type: 'tel',
name: 'telno',
placeholder: 'Телефон *',
btnType: 'phone',
id: 'telno'
};
$('.btn-change').on('click', function (e) {
e.preventDefault();
var typeField = $(this).attr('data-type');
var input = $(this).parents('.group-email-phone').find('input');
if (typeField == 'email') {
input.val('');
input.attr('type', phoneField.type);
input.attr('name', phoneField.name);
input.attr('id', phoneField.id);
input.attr('placeholder', phoneField.placeholder);
$(this).attr('data-type', phoneField.btnType);
input.inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false
});
} else if (typeField == 'phone') {
input.inputmask('unmaskedvalue');
input.inputmask('remove');
input.val('');
input.attr('type', emailField.type);
input.attr('name', emailField.name);
input.attr('id', emailField.id);
input.attr('placeholder', emailField.placeholder);
$(this).attr('data-type', emailField.btnType);
}
});
};
this.snowInit = function () {
var Snow = function Snow(canvas, count) {
var ctx = canvas.getContext('2d');
var snowflakes = [];
var add = function add(item) {
return snowflakes.push(item(canvas));
};
var update = function update() {
return _.forEach(snowflakes, function (el) {
return el.update();
});
};
var resize = function resize() {
ctx.canvas.width = canvas.offsetWidth;
ctx.canvas.height = canvas.offsetHeight;
_.forEach(snowflakes, function (el) {
return el.resized();
});
};
var draw = function draw() {
ctx.clearRect(0, 0, canvas.offsetWidth, canvas.offsetHeight);
_.forEach(snowflakes, function (el) {
return el.draw();
});
};
var events = function events() {
window.addEventListener('resize', resize);
};
var loop = function loop() {
draw();
update();
animFrame(loop);
};
var init = function init() {
_.times(count, function () {
return add(function (canvas) {
return new SnowItem(canvas);
});
});
events();
loop();
};
init(count);
resize();
return { add: add, resize: resize };
};
var defaultOptions = {
color: '#fff',
radius: [0.5, 2.0],
speed: [1, 1],
wind: [-0.1, 1.0]
};
var SnowItem = function SnowItem(canvas) {
var _ref, _ref2, _ref3;
var drawFn = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var opts = arguments[2];
var options = _extends({}, defaultOptions, opts);
var radius = options.radius,
speed = options.speed,
wind = options.wind,
color = options.color;
var params = {
color: color,
x: _.random(0, canvas.offsetWidth),
y: _.random(-canvas.offsetHeight, 0),
radius: (_ref = _).random.apply(_ref, _toConsumableArray(radius)),
speed: (_ref2 = _).random.apply(_ref2, _toConsumableArray(speed)),
wind: (_ref3 = _).random.apply(_ref3, _toConsumableArray(wind)),
isResized: false
};
var ctx = canvas.getContext('2d');
var updateData = function updateData() {
params.x = _.random(0, canvas.offsetWidth);
params.y = _.random(-canvas.offsetHeight, 0);
};
var resized = function resized() {
return params.isResized = true;
};
var drawDefault = function drawDefault() {
ctx.beginPath();
ctx.arc(params.x, params.y, params.radius, 0, 2 * Math.PI);
ctx.fillStyle = params.color;
ctx.fill();
ctx.closePath();
};
var draw = drawFn ? function () {
return drawFn(ctx, params);
} : drawDefault;
var translate = function translate() {
params.y += params.speed;
params.x += params.wind;
};
var onDown = function onDown() {
if (params.y < canvas.offsetHeight) return;
if (params.isResized) {
updateData();
params.isResized = false;
} else {
params.y = 0;
params.x = _.random(0, canvas.offsetWidth);
}
};
var update = function update() {
translate();
onDown();
};
return {
update: update,
resized: resized,
draw: draw
};
};
var el = document.querySelector('.page-content');
var wrapper = document.querySelector('.box-snow');
var canvas = document.getElementById('snow');
var el1 = document.querySelector('.box-snow-front');
var wrapper1 = document.querySelector('.box');
var canvas1 = document.getElementById('snow-front');
var animFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
if ($('#snow').length) {
var snow = Snow(canvas, 80);
}
if ($('#snow-front').length) {
var snow1 = Snow(canvas1, 200);
}
};
};
var projectApp = new App();
var MQ = deviceType();
var isMobile = false;
var isTablet = true;
var isDesktop = false;
throttle('resize', 'optimizedResize');
function switchDeviceType(mq) {
if (mq === 'desktop' && isDesktop) {
isDesktop = false;
isTablet = true;
isMobile = false;
} else if (mq === 'tablet' && isTablet) {
isMobile = true;
isDesktop = true;
isTablet = false;
} else if (mq === 'mobile' && isMobile) {
isMobile = false;
isTablet = true;
isDesktop = false;
}
//console.log('switchDeviceType: ' + mq);
}
staticInit(MQ, projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile);
$window.on('optimizedResize', function () {
var mq = deviceType();
checkDeviceType(mq, isMobile, isTablet, isDesktop, [projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile]);
switchDeviceType(mq);
});
$window.on('DOMContentLoaded', projectApp.init()).on('scroll', function () {
return projectApp.handleScroll();
}).on('load', function () {
return projectApp.handleLoad();
}).on('resize', function () {
return projectApp.handleResize();
});
})(); | _classCallCheck | identifier_name |
bundle.js | "use strict";
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function setCookie(cname, cvalue, exdays) {
var d = new Date();
d.setTime(d.getTime() + exdays * 24 * 60 * 60 * 1000);
var expires = "expires=" + d.toUTCString();
document.cookie = cname + "=" + cvalue + ";" + expires + ";path=/";
}
function getCookie(cname) {
var name = cname + "=";
var ca = document.cookie.split(';');
for (var i = 0; i < ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
}
if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
}
function getScrollbarWidth() {
var outer = document.createElement("div");
outer.style.visibility = "hidden";
outer.style.width = "100px";
outer.style.msOverflowStyle = "scrollbar"; // needed for WinJS apps
document.body.appendChild(outer);
var widthNoScroll = outer.offsetWidth;
// force scrollbars
outer.style.overflow = "scroll";
// add innerdiv
var inner = document.createElement("div");
inner.style.width = "100%";
outer.appendChild(inner);
var widthWithScroll = inner.offsetWidth;
// remove divs
outer.parentNode.removeChild(outer);
//console.log(widthNoScroll, widthWithScroll);
return widthNoScroll - widthWithScroll;
}
window.loader = function (ev) {
var $body = $('body');
var $preloader = $('#preloader');
if (ev === 'show') {
show();
}
if (ev === 'hide') {
hide();
}
function show() {
$body.addClass('loading');
$preloader.addClass('opacity').fadeIn(200);
}
function hide() {
$body.removeClass('loading');
$preloader.fadeOut(200).removeClass('opacity');
}
};
var EventsSlider = {
slider: $('.events-slider'),
init: function init() {
var isInit = this.slider.hasClass('slick-initialized');
if (!isInit) {
this.slider.slick({
slide: '.item',
slidesToShow: 5,
slidesToScroll: 1,
centerMode: false,
centerPadding: '0%',
infinite: false,
arrows: false,
autoplay: false,
dots: false,
unslicked: true,
prevArrow: '<button type="button" class="slick-prev"><span class="icon-chevron-thin-left"></span></button>',
nextArrow: '<button type="button" class="slick-next"><span class="icon-chevron-thin-right"></span></button>',
responsive: [{
breakpoint: 1601,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '33%'
}
}, {
breakpoint: 1025,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '25%'
}
}, {
breakpoint: 580,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '20%'
}
}, {
breakpoint: 440,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '15%'
}
}, {
breakpoint: 370,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '12%'
}
}, {
breakpoint: 340,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
centerMode: true,
dots: true,
centerPadding: '10%'
}
}]
});
this.slider.slick('slickGoTo', 1);
}
},
destroy: function destroy() {
var isInit = this.slider.hasClass('slick-initialized');
console.log(isInit);
if (isInit) {
$('.events-slider').slick('unslick');
}
}
};
var $ = jQuery.noConflict();
var throttle = function throttle(type, name, obj) {
var running = false;
var object = obj || window;
var func = function func() {
if (running) {
return;
}
running = true;
requestAnimationFrame(function () {
object.dispatchEvent(new CustomEvent(name));
running = false;
});
};
object.addEventListener(type, func);
};
function deviceType() {
return window.getComputedStyle(document.querySelector('body'), '::before').getPropertyValue('content').replace(/'/g, '').replace(/"/g, '');
}
function checkDeviceType(MQ, isMobile, isTablet, isDesktop, arrCbs) {
if (MQ === 'desktop' && isDesktop) {
arrCbs[0]();
} else if (MQ === 'tablet' && isTablet) {
arrCbs[1]();
} else if (MQ === 'mobile' && isMobile) {
arrCbs[2]();
}
//console.log('checkDeviceType:' + MQ);
}
function staticInit(mq, firstFunc, otherFunc, secFunc) |
(function () {
var $window = $(window);
var $document = $(document);
var $body = $('body');
var $html = $('html');
var Android = navigator.userAgent.match(/Android/i) && !navigator.userAgent.match(/(Windows\sPhone)/i) ? true : false;
var App = function App() {
var _this = this;
_classCallCheck(this, App);
this.init = function () {
var self = _this;
if (Android) {
$('html').addClass('android');
}
//$('.field-account-number > input').inputmask('Regex', { regex: "^[1-9][0-9][0-9][0-9][0-9][0-9][0-9]?$|^100$" });
/*$('input[type="tel"]').inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-code > input').inputmask({
"mask": "9 9 9 9 9",
clearMaskOnLostFocus: false,
clearIncomplete: true
});
$('.field-account-number > input').inputmask({
"mask": "9 9 9 9 9 9 9",
'clearMaskOnLostFocus': true,
'clearIncomplete': true
});*/
/* var maxYear = new Date().getFullYear() - 17;
$("#dob").datepicker({
container: '.ll-skin-lugo',
changeMonth: true,
changeYear: true,
yearRange: "1940:" + maxYear,
//minDate: new Date(1940, 1 - 1, 1),
maxDate: new Date(maxYear, 12 - 1, 1),
regional: 'ru',
beforeShow: function beforeShow(textbox, instance) {
$('.DivToAppendPicker').append($('#ui-datepicker-div'));
}
});*/
//$('.cuSelect > select').styler();
/* _this.initMmenu();
_this.modalEvents();
_this.customScroll();
_this.scrollToTop();
_this.scrollToId();
_this.popovers();
_this.openTask();
_this.openRegistration();
_this.toggleInput();
_this.togglePass();*/
_this.snowInit();
};
this.handleLoad = function () {
$('body').removeClass('loading');
$('#preloader').fadeOut(200);
//$('header').addClass('show');
};
this.switchToMobile = function () {
console.log('switchToMobile: Mobile');
};
this.switchToTablet = function () {
console.log('switchToTablet: Tablet');
};
this.switchToDesktop = function () {
console.log('switchToDesktop: Desktop');
};
this.handleResize = function () {
//console.log('resize');
};
this.destroy = function () {};
this.handleScroll = function () {};
this.scrollToTop = function () {
var $sctollToTop = $(".scrollToTop");
$(window).scroll(function () {
if ($(this).scrollTop() > 300) {
$('.scrollToTop').fadeIn();
} else {
$('.scrollToTop').fadeOut();
}
});
//Click event to scroll to top
$sctollToTop.click(function (e) {
e.preventDefault();
$('html, body').animate({ scrollTop: 0 }, 800);
return false;
});
};
this.scrollToId = function () {
var $el = $('.jsScrollTo');
$el.click(function (e) {
e.preventDefault();
var $scrollTo = $(this).attr('href');
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
});
};
this.initMmenu = function () {
var $mobileNav = $('#mobile-nav');
var $mobileNavBtn = $('#show-mobile-menu');
if ($('#mobile-nav').length) {
$mobileNav.mmenu({
extensions: ["border-none", "fx-menu-fade", "fx-listitems-slide", "position-front", "fullscreen"],
navbars: {
add: false,
position: "right",
content: ["close"]
}
}, {
clone: false,
offCanvas: {
pageSelector: "#page"
}
});
var mobAPI = $mobileNav.data("mmenu");
$mobileNavBtn.on('click', mobAPI.open);
// $document.on('click', '#show-mobile-menu', function (e) {
// e.preventDefault();
// e.stopPropagation();
// mobAPI.close;
// });
mobAPI.bind('open', function () {
$mobileNavBtn.addClass('is-active');
});
mobAPI.bind('close', function () {
$mobileNavBtn.removeClass('is-active');
});
$(window).on("orientationchange", function (event) {
$mobileNavBtn.removeClass('is-active');
mobAPI.close();
});
}
};
this.openTask = function () {
var audio = document.getElementById("audio");
var cookieTasks = getCookie('openedTasks');
var openedTasksArray = [];
if (cookieTasks.length !== 0) {
openedTasksArray = JSON.parse(cookieTasks);
}
if ($('body').hasClass('logged')) {
$.each(openedTasksArray, function (i, item) {
$('.item-event[data-taskID=' + item + ']').removeClass('disabled');
});
}
$('.item-event').on('click', function (e) {
var logged = $('body').hasClass('logged');
var disabled = $(this).hasClass('disabled');
if (!logged) {
$("#modalLogin").modal("show");
} else {
if (disabled) {
audio.play();
openedTasksArray.push($(this).attr('data-taskID'));
setCookie('openedTasks', JSON.stringify(openedTasksArray), 1);
$(this).removeClass('disabled');
}
}
});
};
this.modalEvents = function () {
$('.modal').on('shown.bs.modal', function () {}).on('hidden.bs.modal', function () {
$('body').css('padding-right', '0');
}).on('show.bs.modal', function (e) {
testAnim('bounceInLeft');
}).on('hide.bs.modal', function (e) {
testAnim('bounceOutRight');
});
$('.modal-terms').on('show.bs.modal', function () {
$(this).append('<div class="modal-backdrop fade in" data-dismiss="modal" aria-label="Close"></div>');
}).on('hide.bs.modal', function (e) {
$(this).find('.modal-backdrop').remove();
});
$('#modalRegistration').on('hidden.bs.modal', function (e) {
var cookieUid = getCookie('uid');
if (cookieUid.length > 2) {
location.reload();
}
});
function closeOpenModal(modal) {
$('.modal').modal('hide');
setTimeout(function () {
$(modal).modal('show');
}, 550);
};
function testAnim(x) {
$('.modal .modal-dialog').attr('class', 'modal-dialog ' + x + ' animated');
};
};
this.popovers = function () {
var popover = $('[data-toggle="popover"]');
popover.popover({
html: true,
content: function content() {
var content = $(this).attr("data-popover-content");
return $(content).children(".popover-body").html();
},
title: function title() {
var title = $(this).attr("data-popover-content");
return $(title).children(".popover-heading").html();
}
});
};
this.scrollDown = function () {
$('.btn-scroll-down').on('click', function (e) {
e.preventDefault();
if ($('html').hasClass('fp-enabled')) {
$.fn.fullpage.moveTo('instruction');
} else {
var $scrollTo = '#info';
$('html, body').animate({
scrollTop: $($scrollTo).offset().top
}, 400);
return false;
}
});
};
this.customScroll = function () {
$(window).load(function () {
$('.cuScroll').mCustomScrollbar({
axis: "y",
scrollInertia: 0,
//theme: '3d',
scrollButtons: { enable: false },
mouseWheel: { enable: true }
});
});
};
this.wowAnimation = function () {
var wow = new WOW({
boxClass: 'wow', // animated element css class (default is wow)
animateClass: 'animated', // animation css class (default is animated)
offset: 0, // distance to the element when triggering the animation (default is 0)
mobile: false, // trigger animations on mobile devices (default is true)
live: true, // act on asynchronously loaded content (default is true)
callback: function callback(box) {
// the callback is fired every time an animation is started
// the argument that is passed in is the DOM node being animated
},
scrollContainer: null // optional scroll container selector, otherwise use window
});
wow.init();
};
this.openRegistration = function () {
$('#open-modal-reg').on('click', function (e) {
e.preventDefault();
$("#modalLogin").modal("hide");
setTimeout(function () {
$("#modalRegistration").modal("show");
}, 550);
});
};
this.togglePass = function () {
var inputPass = $('#passwd');
$('.btn-show-pass').on('click', function (e) {
e.preventDefault();
if ($(this).hasClass('active')) {
$(this).removeClass('active');
inputPass.attr('type', 'password');
} else {
$(this).addClass('active');
inputPass.attr('type', 'text');
}
});
};
this.toggleInput = function () {
var emailField = {
type: 'email',
name: 'email',
placeholder: 'Email *',
btnType: 'email',
id: 'email'
};
var phoneField = {
type: 'tel',
name: 'telno',
placeholder: 'Телефон *',
btnType: 'phone',
id: 'telno'
};
$('.btn-change').on('click', function (e) {
e.preventDefault();
var typeField = $(this).attr('data-type');
var input = $(this).parents('.group-email-phone').find('input');
if (typeField == 'email') {
input.val('');
input.attr('type', phoneField.type);
input.attr('name', phoneField.name);
input.attr('id', phoneField.id);
input.attr('placeholder', phoneField.placeholder);
$(this).attr('data-type', phoneField.btnType);
input.inputmask({
"mask": "+9{1,2} (999) 999 99 99",
clearMaskOnLostFocus: false
});
} else if (typeField == 'phone') {
input.inputmask('unmaskedvalue');
input.inputmask('remove');
input.val('');
input.attr('type', emailField.type);
input.attr('name', emailField.name);
input.attr('id', emailField.id);
input.attr('placeholder', emailField.placeholder);
$(this).attr('data-type', emailField.btnType);
}
});
};
this.snowInit = function () {
var Snow = function Snow(canvas, count) {
var ctx = canvas.getContext('2d');
var snowflakes = [];
var add = function add(item) {
return snowflakes.push(item(canvas));
};
var update = function update() {
return _.forEach(snowflakes, function (el) {
return el.update();
});
};
var resize = function resize() {
ctx.canvas.width = canvas.offsetWidth;
ctx.canvas.height = canvas.offsetHeight;
_.forEach(snowflakes, function (el) {
return el.resized();
});
};
var draw = function draw() {
ctx.clearRect(0, 0, canvas.offsetWidth, canvas.offsetHeight);
_.forEach(snowflakes, function (el) {
return el.draw();
});
};
var events = function events() {
window.addEventListener('resize', resize);
};
var loop = function loop() {
draw();
update();
animFrame(loop);
};
var init = function init() {
_.times(count, function () {
return add(function (canvas) {
return new SnowItem(canvas);
});
});
events();
loop();
};
init(count);
resize();
return { add: add, resize: resize };
};
var defaultOptions = {
color: '#fff',
radius: [0.5, 2.0],
speed: [1, 1],
wind: [-0.1, 1.0]
};
var SnowItem = function SnowItem(canvas) {
var _ref, _ref2, _ref3;
var drawFn = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var opts = arguments[2];
var options = _extends({}, defaultOptions, opts);
var radius = options.radius,
speed = options.speed,
wind = options.wind,
color = options.color;
var params = {
color: color,
x: _.random(0, canvas.offsetWidth),
y: _.random(-canvas.offsetHeight, 0),
radius: (_ref = _).random.apply(_ref, _toConsumableArray(radius)),
speed: (_ref2 = _).random.apply(_ref2, _toConsumableArray(speed)),
wind: (_ref3 = _).random.apply(_ref3, _toConsumableArray(wind)),
isResized: false
};
var ctx = canvas.getContext('2d');
var updateData = function updateData() {
params.x = _.random(0, canvas.offsetWidth);
params.y = _.random(-canvas.offsetHeight, 0);
};
var resized = function resized() {
return params.isResized = true;
};
var drawDefault = function drawDefault() {
ctx.beginPath();
ctx.arc(params.x, params.y, params.radius, 0, 2 * Math.PI);
ctx.fillStyle = params.color;
ctx.fill();
ctx.closePath();
};
var draw = drawFn ? function () {
return drawFn(ctx, params);
} : drawDefault;
var translate = function translate() {
params.y += params.speed;
params.x += params.wind;
};
var onDown = function onDown() {
if (params.y < canvas.offsetHeight) return;
if (params.isResized) {
updateData();
params.isResized = false;
} else {
params.y = 0;
params.x = _.random(0, canvas.offsetWidth);
}
};
var update = function update() {
translate();
onDown();
};
return {
update: update,
resized: resized,
draw: draw
};
};
var el = document.querySelector('.page-content');
var wrapper = document.querySelector('.box-snow');
var canvas = document.getElementById('snow');
var el1 = document.querySelector('.box-snow-front');
var wrapper1 = document.querySelector('.box');
var canvas1 = document.getElementById('snow-front');
var animFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
if ($('#snow').length) {
var snow = Snow(canvas, 80);
}
if ($('#snow-front').length) {
var snow1 = Snow(canvas1, 200);
}
};
};
var projectApp = new App();
var MQ = deviceType();
var isMobile = false;
var isTablet = true;
var isDesktop = false;
throttle('resize', 'optimizedResize');
function switchDeviceType(mq) {
if (mq === 'desktop' && isDesktop) {
isDesktop = false;
isTablet = true;
isMobile = false;
} else if (mq === 'tablet' && isTablet) {
isMobile = true;
isDesktop = true;
isTablet = false;
} else if (mq === 'mobile' && isMobile) {
isMobile = false;
isTablet = true;
isDesktop = false;
}
//console.log('switchDeviceType: ' + mq);
}
staticInit(MQ, projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile);
$window.on('optimizedResize', function () {
var mq = deviceType();
checkDeviceType(mq, isMobile, isTablet, isDesktop, [projectApp.switchToDesktop, projectApp.switchToTablet, projectApp.switchToMobile]);
switchDeviceType(mq);
});
$window.on('DOMContentLoaded', projectApp.init()).on('scroll', function () {
return projectApp.handleScroll();
}).on('load', function () {
return projectApp.handleLoad();
}).on('resize', function () {
return projectApp.handleResize();
});
})(); | {
if (mq === 'desktop') {
firstFunc();
} else if (mq === 'tablet') {
otherFunc();
} else if (mq === 'mobile') {
secFunc();
}
// console.log('staticInit:' + mq);
} | identifier_body |
session.rs | use crate::{
auto_remote_syscalls::{AutoRemoteSyscalls, AutoRestoreMem},
emu_fs::EmuFs,
kernel_abi::{
syscall_number_for_close, syscall_number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{
shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues
for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn find_address_space(&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() & !MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> |
fn on_create_task_common<S: Session>(sess: &S, t: TaskSharedPtr) {
let rec_tid = t.rec_tid();
sess.task_map.borrow_mut().insert(rec_tid, t);
}
| {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
) != 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_size =
read_val_mem(clone_leader, num_byes_addr, None) as usize + size_of::<syscallbuf_hdr>();
}
read_mem(clone_leader, start, data_size, None)
} | identifier_body |
session.rs | use crate::{
auto_remote_syscalls::{AutoRemoteSyscalls, AutoRestoreMem},
emu_fs::EmuFs,
kernel_abi::{
syscall_number_for_close, syscall_number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) | else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{
shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues
for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn find_address_space(&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() & !MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
) != 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_size =
read_val_mem(clone_leader, num_byes_addr, None) as usize + size_of::<syscallbuf_hdr>();
}
read_mem(clone_leader, start, data_size, None)
}
fn on_create_task_common<S: Session>(sess: &S, t: TaskSharedPtr) {
let rec_tid = t.rec_tid();
sess.task_map.borrow_mut().insert(rec_tid, t);
}
| {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} | conditional_block |
session.rs | use crate::{
auto_remote_syscalls::{AutoRemoteSyscalls, AutoRestoreMem},
emu_fs::EmuFs,
kernel_abi::{
syscall_number_for_close, syscall_number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{
shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues
for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn | (&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() & !MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
) != 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_size =
read_val_mem(clone_leader, num_byes_addr, None) as usize + size_of::<syscallbuf_hdr>();
}
read_mem(clone_leader, start, data_size, None)
}
fn on_create_task_common<S: Session>(sess: &S, t: TaskSharedPtr) {
let rec_tid = t.rec_tid();
sess.task_map.borrow_mut().insert(rec_tid, t);
}
| find_address_space | identifier_name |
session.rs | use crate::{
auto_remote_syscalls::{AutoRemoteSyscalls, AutoRestoreMem},
emu_fs::EmuFs,
kernel_abi::{
syscall_number_for_close, syscall_number_for_munmap, syscall_number_for_openat,
SupportedArch,
},
log::LogDebug,
preload_interface::syscallbuf_hdr,
rd::RD_RESERVED_ROOT_DIR_FD,
remote_ptr::{RemotePtr, Void},
session::{
address_space::{
address_space::{AddressSpaceSharedPtr, Mapping},
memory_range::MemoryRangeKey,
MappingFlags,
},
diversion_session::DiversionSession,
record_session::RecordSession,
replay_session::ReplaySession,
session_inner::{AddressSpaceMap, SessionInner, TaskMap, ThreadGroupMap},
task::{
task_common::{self, copy_state, os_fork_into, read_mem, read_val_mem},
task_inner::{CloneFlags, CloneReason, WriteFlags},
Task, TaskSharedPtr, TaskSharedWeakPtr,
},
},
taskish_uid::{AddressSpaceUid, TaskUid, ThreadGroupUid},
thread_group::{ThreadGroup, ThreadGroupSharedPtr},
trace::trace_stream::TraceStream,
util::page_size,
};
use address_space::address_space::AddressSpace;
use libc::pid_t;
use nix::sys::mman::MapFlags;
use session_inner::{AddressSpaceClone, CloneCompletion};
use std::{
cell::{Ref, RefMut},
mem::size_of,
ops::DerefMut,
rc::{Rc, Weak},
};
pub mod address_space;
pub mod diversion_session;
pub mod record_session;
pub mod replay_session;
pub mod session_common;
pub mod session_inner;
pub mod task;
/// Note that this is NOT Rc<RefCell<Box<dyn Session>>>
/// Session will be shared.
/// Individual parts of the session can be wrapped in RefCell<> as required
pub type SessionSharedPtr = Rc<Box<dyn Session>>;
pub type SessionSharedWeakPtr = Weak<Box<dyn Session>>;
pub trait Session: DerefMut<Target = SessionInner> {
/// `tasks().len()` will be zero and all the OS tasks will be
/// gone when this returns, or this won't return.
fn kill_all_tasks(&self);
fn as_session_inner(&self) -> &SessionInner;
fn as_session_inner_mut(&mut self) -> &mut SessionInner;
/// DIFF NOTE: Simply called on_destroy() in rr.
fn on_destroy_task(&self, t: &dyn Task) {
self.tasks_mut().remove(&t.rec_tid());
}
fn as_record(&self) -> Option<&RecordSession> {
None
}
fn as_record_mut(&mut self) -> Option<&mut RecordSession> {
None
}
fn as_replay(&self) -> Option<&ReplaySession> {
None
}
fn as_diversion(&self) -> Option<&DiversionSession> {
None
}
fn as_diversion_mut(&mut self) -> Option<&DiversionSession> {
None
}
/// Avoid using this boolean methods. Use the `as_*` methods that return Option<> instead.
fn is_recording(&self) -> bool {
self.as_record().is_some()
}
fn is_replaying(&self) -> bool {
self.as_replay().is_some()
}
fn is_diversion(&self) -> bool {
self.as_diversion().is_some()
}
fn new_task(
&self,
tid: pid_t,
rec_tid: Option<pid_t>,
serial: u32,
a: SupportedArch,
weak_self: TaskSharedWeakPtr,
) -> Box<dyn Task>;
fn trace_stream(&self) -> Option<Ref<'_, TraceStream>> {
None
}
fn trace_stream_mut(&self) -> Option<RefMut<'_, TraceStream>> {
None
}
fn cpu_binding(&self, trace: &TraceStream) -> Option<u32> {
trace.bound_to_cpu()
}
/// DIFF NOTE: Simply called on_create() in rr
fn on_create_task(&self, t: TaskSharedPtr);
/// NOTE: called Session::copy_state_to() in rr.
fn copy_state_to_session(
&self,
dest: SessionSharedPtr,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
) {
self.assert_fully_initialized();
debug_assert!(dest.clone_completion.borrow().is_none());
let mut completion = CloneCompletion::default();
for (_uid, vm_weak) in self.vm_map.borrow().iter() {
// Pick an arbitrary task to be group leader. The actual group leader
// might have died already.
let vm = vm_weak.upgrade().unwrap();
let group_leader = vm.task_set().iter().next().unwrap();
log!(
LogDebug,
" forking tg {} (real: {})",
group_leader.tgid(),
group_leader.real_tgid()
);
let mut group: AddressSpaceClone = AddressSpaceClone::default();
let clone_leader: TaskSharedPtr = os_fork_into(&**group_leader, dest.clone());
group.clone_leader = Rc::downgrade(&clone_leader);
dest.on_create_task(clone_leader.clone());
log!(LogDebug, " forked new group leader {}", clone_leader.tid());
{
let mut remote = AutoRemoteSyscalls::new(&**clone_leader);
let mut shared_maps_to_clone = Vec::new();
for (&k, m) in &clone_leader.vm().maps() {
// Special case the syscallbuf as a performance optimization. The amount
// of data we need to capture is usually significantly smaller than the
// size of the mapping, so allocating the whole mapping here would be
// wasteful.
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
group
.captured_memory
.push((m.map.start(), capture_syscallbuf(&m, &**clone_leader)));
} else if m.local_addr.is_some() {
ed_assert_eq!(
clone_leader,
m.map.start(),
AddressSpace::preload_thread_locals_start()
);
} else if m.recorded_map.flags().contains(MapFlags::MAP_SHARED)
&& emu_fs.has_file_for(&m.recorded_map)
{ | for k in shared_maps_to_clone {
remap_shared_mmap(&mut remote, emu_fs, dest_emu_fs, k);
}
for t in vm.task_set().iter() {
if Rc::ptr_eq(&group_leader, &t) {
continue;
}
log!(LogDebug, " cloning {}", t.rec_tid());
group.member_states.push(t.capture_state());
}
}
group.clone_leader_state = group_leader.capture_state();
completion.address_spaces.push(group);
}
*dest.clone_completion.borrow_mut() = Some(Box::new(completion));
debug_assert!(!dest.vms().is_empty());
}
/// Call this before doing anything that requires access to the full set
/// of tasks (i.e., almost anything!).
fn finish_initializing(&self) {
if self.clone_completion.borrow().is_none() {
return;
}
// DIFF NOTE: We're setting clone completion to None here instead of at the end of the
// method.
let cc = self.clone_completion.replace(None).unwrap();
for tgleader in &cc.address_spaces {
let leader = tgleader.clone_leader.upgrade().unwrap();
{
let mut remote = AutoRemoteSyscalls::new(&**leader);
let mut mk_vec = Vec::new();
for (&mk, m) in &remote.vm().maps() {
if m.flags.contains(MappingFlags::IS_SYSCALLBUF) {
mk_vec.push(mk);
}
}
for mk in mk_vec {
// Creating this mapping was delayed in capture_state for performance
remote.recreate_shared_mmap(mk, None, None);
}
}
for (rptr, captured_mem) in &tgleader.captured_memory {
leader.write_bytes_helper(*rptr, captured_mem, None, WriteFlags::empty());
}
{
let mut remote2 = AutoRemoteSyscalls::new(&**leader);
for tgmember in &tgleader.member_states {
let t_clone = task_common::os_clone_into(tgmember, &mut remote2);
self.on_create_task(t_clone.clone());
copy_state(&**t_clone, tgmember);
}
}
copy_state(
&**tgleader.clone_leader.upgrade().unwrap(),
&tgleader.clone_leader_state,
);
}
// Don't need to set clone completion to `None`. Its already been done!
}
/// See Task::clone().
/// This method is simply called Session::clone in rr.
fn clone_task(
&self,
p: &dyn Task,
flags: CloneFlags,
stack: RemotePtr<Void>,
tls: RemotePtr<Void>,
cleartid_addr: RemotePtr<i32>,
new_tid: pid_t,
new_rec_tid: Option<pid_t>,
) -> TaskSharedPtr {
self.assert_fully_initialized();
let c = p.clone_task(
CloneReason::TraceeClone,
flags,
stack,
tls,
cleartid_addr,
new_tid,
new_rec_tid,
self.next_task_serial(),
None,
);
self.on_create_task(c.clone());
c
}
/// Return the task created with `rec_tid`, or None if no such
/// task exists.
/// NOTE: Method is simply called Session::find_task() in rr
fn find_task_from_rec_tid(&self, rec_tid: pid_t) -> Option<TaskSharedPtr> {
self.finish_initializing();
self.tasks().get(&rec_tid).cloned()
}
/// NOTE: Method is simply called Session::find task() in rr
fn find_task_from_task_uid(&self, tuid: TaskUid) -> Option<TaskSharedPtr> {
self.find_task_from_rec_tid(tuid.tid())
}
/// Return the thread group whose unique ID is `tguid`, or None if no such
/// thread group exists.
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_tguid(&self, tguid: ThreadGroupUid) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
self.thread_group_map()
.get(&tguid)
.map(|t| t.upgrade().unwrap())
}
/// Find the thread group for a specific pid
/// NOTE: Method is simply called Session::find thread_group() in rr
fn find_thread_group_from_pid(&self, pid: pid_t) -> Option<ThreadGroupSharedPtr> {
self.finish_initializing();
for (tguid, tg) in self.thread_group_map().iter() {
if tguid.tid() == pid {
return Some(tg.upgrade().unwrap());
}
}
None
}
/// Return the AddressSpace whose unique ID is `vmuid`, or None if no such
/// address space exists.
fn find_address_space(&self, vmuid: AddressSpaceUid) -> Option<AddressSpaceSharedPtr> {
self.finish_initializing();
// If the weak ptr was found, we _must_ be able to upgrade it!;
self.vm_map().get(&vmuid).map(|a| a.upgrade().unwrap())
}
/// Return a copy of `tg` with the same mappings.
/// NOTE: Called simply Session::clone() in rr
fn clone_tg(&self, t: &dyn Task, tg: ThreadGroupSharedPtr) -> ThreadGroupSharedPtr {
self.assert_fully_initialized();
// If tg already belongs to our session this is a fork to create a new
// taskgroup, otherwise it's a session-clone of an existing taskgroup
if self.weak_self.ptr_eq(tg.borrow().session_weak()) {
ThreadGroup::new(
self.weak_self.clone(),
Some(Rc::downgrade(&tg)),
t.rec_tid(),
t.tid(),
t.own_namespace_tid(),
t.tuid().serial(),
)
} else {
let maybe_parent = match tg.borrow().parent() {
Some(parent_tg) => self
.find_thread_group_from_tguid(parent_tg.borrow().tguid())
.map(|found| Rc::downgrade(&found)),
None => None,
};
ThreadGroup::new(
self.weak_self.clone(),
maybe_parent,
tg.borrow().tgid,
t.tid(),
t.own_namespace_tid(),
tg.borrow().tguid().serial(),
)
}
}
/// Return the set of Tasks being traced in this session.
fn tasks(&self) -> Ref<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow()
}
fn tasks_mut(&self) -> RefMut<'_, TaskMap> {
self.finish_initializing();
self.as_session_inner().task_map.borrow_mut()
}
fn thread_group_map(&self) -> Ref<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow()
}
fn thread_group_map_mut(&self) -> RefMut<'_, ThreadGroupMap> {
self.as_session_inner().thread_group_map.borrow_mut()
}
fn vm_map(&self) -> Ref<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow()
}
fn vm_map_mut(&self) -> RefMut<'_, AddressSpaceMap> {
self.as_session_inner().vm_map.borrow_mut()
}
/// Call `post_exec()` immediately after a tracee has successfully
/// `execve()`'d. After that, `done_initial_exec()` returns true.
/// This is called while we're still in the execve syscall so it's not safe
/// to perform remote syscalls in this method.
///
/// Tracee state can't be validated before the first exec,
/// because the address space inside the rd process for `rd replay`
/// will be different than it was for `rd record`.
/// After the first exec, we're running tracee code, and
/// everything must be the same.
///
/// DIFF NOTE: Additional param `t`. Makes things simpler.
fn post_exec(&self, t: &dyn Task) {
// We just saw a successful exec(), so from now on we know
// that the address space layout for the replay tasks will
// (should!) be the same as for the recorded tasks. So we can
// start validating registers at events.
self.assert_fully_initialized();
if self.done_initial_exec() {
return;
}
self.done_initial_exec_.set(true);
debug_assert_eq!(self.tasks().len(), 1);
t.flush_inconsistent_state();
self.spawned_task_error_fd_.borrow_mut().close();
}
}
fn remap_shared_mmap(
remote: &mut AutoRemoteSyscalls,
emu_fs: &EmuFs,
dest_emu_fs: &mut EmuFs,
k: MemoryRangeKey,
) {
let m = remote.vm().mapping_of(k.start()).unwrap().clone();
log!(
LogDebug,
" remapping shared region at {}-{}",
m.map.start(),
m.map.end()
);
let arch = remote.arch();
rd_infallible_syscall!(
remote,
syscall_number_for_munmap(arch),
m.map.start().as_usize(),
m.map.size()
);
let emu_file;
if let Some(file) = dest_emu_fs.at(&m.recorded_map) {
emu_file = file;
} else {
emu_file = dest_emu_fs.clone_file(emu_fs.at(&m.recorded_map).unwrap());
}
// TODO: this duplicates some code in replay_syscall.cc, but
// it's somewhat nontrivial to factor that code out.
let remote_fd: i32;
{
let path = emu_file.borrow().proc_path();
let arch = remote.arch();
let mut child_path = AutoRestoreMem::push_cstr(remote, path.as_str());
// Always open the emufs file O_RDWR, even if the current mapping prot
// is read-only. We might mprotect it to read-write later.
// skip leading '/' since we want the path to be relative to the root fd
let addr: RemotePtr<Void> = child_path.get().unwrap() + 1usize;
let res = rd_infallible_syscall!(
child_path,
syscall_number_for_openat(arch),
RD_RESERVED_ROOT_DIR_FD,
addr.as_usize(),
libc::O_RDWR
);
if 0 > res {
fatal!("Couldn't open {} in tracee", path);
}
remote_fd = res as i32;
}
let real_file = remote.task().stat_fd(remote_fd);
let real_file_name = remote.task().file_name_of_fd(remote_fd);
// XXX this condition is x86/x64-specific, I imagine.
remote.infallible_mmap_syscall(
Some(m.map.start()),
m.map.size(),
m.map.prot(),
// The remapped segment *must* be
// remapped at the same address,
// or else many things will go
// haywire.
(m.map.flags() & !MapFlags::MAP_ANONYMOUS) | MapFlags::MAP_FIXED,
remote_fd,
m.map.file_offset_bytes() / page_size() as u64,
);
// We update the AddressSpace mapping too, since that tracks the real file
// name and we need to update that.
remote.vm().map(
remote.task(),
m.map.start(),
m.map.size(),
m.map.prot(),
m.map.flags(),
m.map.file_offset_bytes(),
&real_file_name,
real_file.st_dev,
real_file.st_ino,
None,
Some(&m.recorded_map),
Some(emu_file),
None,
None,
);
let arch = remote.arch();
remote.infallible_syscall(syscall_number_for_close(arch), &[remote_fd as usize]);
}
fn capture_syscallbuf(m: &Mapping, clone_leader: &dyn Task) -> Vec<u8> {
let start = m.map.start();
let data_size: usize;
let num_byes_addr =
RemotePtr::<u32>::cast(remote_ptr_field!(start, syscallbuf_hdr, num_rec_bytes));
if read_val_mem(
clone_leader,
remote_ptr_field!(start, syscallbuf_hdr, locked),
None,
) != 0u8
{
// There may be an incomplete syscall record after num_rec_bytes that
// we need to capture here. We don't know how big that record is,
// so just record the entire buffer. This should not be common.
data_size = m.map.size();
} else {
data_size =
read_val_mem(clone_leader, num_byes_addr, None) as usize + size_of::<syscallbuf_hdr>();
}
read_mem(clone_leader, start, data_size, None)
}
fn on_create_task_common<S: Session>(sess: &S, t: TaskSharedPtr) {
let rec_tid = t.rec_tid();
sess.task_map.borrow_mut().insert(rec_tid, t);
} | shared_maps_to_clone.push(k);
}
}
// Do this in a separate loop to avoid iteration invalidation issues | random_line_split |
data.js | (function () {
"use strict";
var __DOMAIN__ = 'http://laiwang.com';
var __API_DOMAIN__ = 'http://api.laiwang.com/v1';
var __LENGTH__ = 25;
// These three strings encode placeholder images. You will want to set the backgroundImage property in your real data to be URLs to images.
var lightGray = "../images/item_bac01.jpg"; //"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY7h4+cp/AAhpA3h+ANDKAAAAAElFTkSuQmCC";
var mediumGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY5g8dcZ/AAY/AsAlWFQ+AAAAAElFTkSuQmCC";
var darkGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY3B0cPoPAANMAcOba1BlAAAAAElFTkSuQmCC";
// Each of these sample groups must have a unique key to be displayed
//对以上的group进行了具体定义:laiwang:来往主墙;event:在一起;friend:好友
var Groups = [
{ key: "laiwang1", title: "来往", subtitle: "laiwang subtitle title", backgroundImage: darkGray, description: "this is the laiwang brief wall." },
{ key: "laiwang2", title: "在一起", subtitle: "event subtitle title", backgroundImage: darkGray, description: "this is the event lists." },
{ key: "laiwang3", title: "好友", subtitle: "friend subtitle title", backgroundImage: darkGray, description: "this is the all friend." }
]
function groupKeySelector(item) {
return item.group.key;
}
function groupDataSelector(item) {
return item.group;
}
//function groupSorter(item) {
// return [0,1];
//}
// This function returns a WinJS.Binding.List containing only the items that belong to the provided group.
//从list中,根据group获取它所包含的item
function getItemsFromGroup(group) {
return list.createFiltered(function (item) { return item.group.key === group.key; });
}
//取出item中的评论
//function getCommentsFromItem(item) {
// //var items = getItemsFromGroup(item.group);
// return commentsList.createFiltered(function (c) { return c.item.id === item.id; });
//}
// TODO: Replace the data with your real data.
// You can add data from asynchronous sources whenever it becomes available.
//sampleItems.forEach(function (item) {
// list.push(item);
//});
//重新设置jQuery的ajax
function ajaxSet() {
$.ajaxSetup({
cache: false,
dataType: 'json',
data: {},
| beforeSend: function (jqXHR, settings) {
if (typeof this.data === 'string') {
this.data = this.data.replace(/%[0-1][0-9a-f]/g, '%20');
this.data += '&access_token=' + localStorage['access_token'];
} else if (typeof this.data === 'object') {
this.data['access_token'] = localStorage['access_token'];
}
this._beforeSend && this._beforeSend(jqXHR, settings);
},
error: function (jqXHR, textStatus, errorThrown) {
this._error && this._error(jqXHR, textStatus, errorThrown);
this._failure && this._failure(jqXHR, textStatus, errorThrown);
var errorObject = $.parseJSON(jqXHR.responseText);
if (errorObject.error === "invalid_token" || errorObject.error === "expired_token" || errorObject.error === "invalid_grant") {
authentication.refreshAccessToken(function () {
//babylon.init();//?????????????????????????
//$('#index').trigger('click');
}, function () {
authentication.toAuthorizePage();
});
}
},
success: function (data, textStatus, jqXHR) {
if (!data) return;
this._success && this._success(data, textStatus, jqXHR);
}
});
}
ajaxSet();
//根据id获得某人的主墙
//没有id也行
function getStream(id) {
var id = id || '';
//var subUri = {
// stream: '/feed/post/main/list',
// incoming: '/feed/post/incoming/list',
// group: '/feed/post/circle/list'
//};
var postData = {
'cursor': 0,
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/feed/post/main/list', //获取laiwang主墙
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
for (var index in data) {
data[index].content = data[index].content.replace(/\n/gi, '<br/>');
}
data.forEach(function (item) {//to do rebuild
// Each of these sample items should have a reference to a particular group.
item.group = Groups[0];//通过上面的ajax请求获取到的都是laiwang主墙信息,所以取Groups数组中的第0项:laiwang
//item.key = item.id;
item.itemPublisherAvatar = item.publisher.avatar;
item.title = item.publisher.name;
item.subtitle = transformDate(item.createdAt);
item.description = item.content.substr(0, 100);
item.content = item.content;
item.backgroundImage = (!!(item.attachments[0]) && item.attachments[0].picture) ? item.attachments[0].picture : lightGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = (!item.attachments[0] || !item.attachments[0].picture) ? item.description : "";
//关于评论
//if (!!item.commentCount && item.commentCount !== 0) {
// //commentsList = [];
// item.comments.forEach(function (v) {
// v.item = item;
// //v.item.key = item.id;
// v.commentorLink = __API_DOMAIN__ + "/u/" + v.commentor.id;
// v.commentorAvatar = v.commentor.avatar;
// v.commentorName = v.commentor.name;
// v.commentCreatedAt = transformDate(v.createdAt);
// v.comment = v.content;
// commentsList.push(v);
// });
// //item.comments = commentsList;
//}
list.push(item);
});
}
});
}
//获取好友列表
function getFriends() {
var postData = {
'type': 'FOLLOWING',
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/relationship/friend/list',
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
data.forEach(function (item) {
item.group = Groups[2];
//item.key = item.id;
item.itemPublisherAvatar = item.avatar;
item.title = item.name;
item.subtitle = item.connectionType;
item.description = "";
item.content = "";
item.backgroundImage = mediumGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = "";
list.push(item);
});
}
})
}
//转换时间格式:毫秒-->yyyy-MM-dd HH:mm:ss
function transformDate(ms) {
var sDate = new Date(ms);
sDate = sDate.getFullYear() + "-" + (sDate.getMonth() + 1) + "-" + sDate.getDate() + " " + sDate.getHours() + ":" + sDate.getMinutes() + ":" + sDate.getSeconds();
return sDate;
}
var list = new WinJS.Binding.List();
//var commentsList = new WinJS.Binding.List();
getStream();
getFriends();
//取出所有的item。是经过“组化”的item。“组化”就是“组化”,一种特殊的数据结构,我也说不清
var groupedItems = list.createGrouped(groupKeySelector, groupDataSelector);
//取出所有的group。由上面“经过组化的item”提取而来,是与上面相同的一种特殊的数据结构
//貌似是一种由下级到上级的“逆向工程”
//var groups = groupedItems.groups;
//存放每个item的评论
//var comments = commentsList.createGrouped(groupKeySelector, groupDataSelector);
WinJS.Namespace.define("data", {
API_DOMAIN: __API_DOMAIN__,
DOMAIN:__DOMAIN__,
items: groupedItems,
groups: groupedItems.groups,
getItemsFromGroup: getItemsFromGroup,
//getCommentsFromItem: getCommentsFromItem,
transformDate: transformDate
});
})();
var myCellSpanningData = new WinJS.Binding.List([
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "largeItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "mediumItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "mediumItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "smallItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "smallItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "mediumItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "largeItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "mediumItem" }
]);
| identifier_name |
|
data.js | (function () {
"use strict";
var __DOMAIN__ = 'http://laiwang.com';
var __API_DOMAIN__ = 'http://api.laiwang.com/v1';
var __LENGTH__ = 25;
// These three strings encode placeholder images. You will want to set the backgroundImage property in your real data to be URLs to images.
var lightGray = "../images/item_bac01.jpg"; //"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY7h4+cp/AAhpA3h+ANDKAAAAAElFTkSuQmCC";
var mediumGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY5g8dcZ/AAY/AsAlWFQ+AAAAAElFTkSuQmCC";
var darkGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY3B0cPoPAANMAcOba1BlAAAAAElFTkSuQmCC";
// Each of these sample groups must have a unique key to be displayed
//对以上的group进行了具体定义:laiwang:来往主墙;event:在一起;friend:好友
var Groups = [
{ key: "laiwang1", title: "来往", subtitle: "laiwang subtitle title", backgroundImage: darkGray, description: "this is the laiwang brief wall." },
{ key: "laiwang2", title: "在一起", subtitle: "event subtitle title", backgroundImage: darkGray, description: "this is the event lists." },
{ key: "laiwang3", title: "好友", subtitle: "friend subtitle title", backgroundImage: darkGray, description: "this is the all friend." }
]
function groupKeySelector(item) {
return item.group.key;
}
function groupDataSelector(item) {
return item.group;
}
//function groupSorter(item) {
// return [0,1];
//}
// This function returns a WinJS.Binding.List containing only the items that belong to the provided group.
//从list中,根据group获取它所包含的item
function getItemsFromGroup(group) {
return list.createFiltered(function (item) { return item.group.key === group.key; });
}
//取出item中的评论
//function getCommentsFromItem(item) {
// //var items = getItemsFromGroup(item.group);
// return commentsList.createFiltered(function (c) { return c.item.id === item.id; });
//}
// TODO: Replace the data with your real data.
// You can add data from asynchronous sources whenever it becomes available.
//sampleItems.forEach(function (item) {
// list.push(item);
//});
//重新设置jQuery的ajax
function ajaxSet() {
$.ajaxSetup({
cache: false,
dataType: 'json',
data: {},
beforeSend: function (jqXHR, settings) {
if (typeof this.data === 'string') {
this.data = this.data.replace(/%[0-1][0-9a-f]/g, '%20');
this.data += '&access_token=' + localStorage['access_token'];
} else if (typeof this.data === 'object') {
this.data['access_token'] = localStorage['access_token'];
}
this._beforeSend && this._beforeSend(jqXHR, settings);
},
error: function (jqXHR, textStatus, errorThrown) {
this._error && this._error(jqXHR, textStatus, errorThrown);
this._failure && this._failure(jqXHR, textStatus, errorThrown);
var errorObject = $.parseJSON(jqXHR.responseText);
if (errorObject.error === "invalid_token" || errorObject.error === "expired_token" || errorObject.error === "invalid_grant") {
authentication.refreshAccessToken(function () {
//babylon.init();//?????????????????????????
//$('#index').trigger('click');
}, function () {
authentication.toAuthorizePage();
});
}
},
success: function (data, textStatus, jqXHR) {
if (!data) return;
this._success && this._success(data, textStatus, jqXHR);
}
});
}
ajaxSet();
//根据id获得某人的主墙
//没有id也行
function getStream(id) {
var id = id || '';
//var subUri = {
// stream: '/feed/post/main/list',
// incoming: '/feed/post/incoming/list',
// group: '/feed/post/circle/list'
//};
var postData = {
'cursor': 0,
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/feed/post/main/list', //获取laiwang主墙
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
for (var index in data) {
data[index].content = data[index].content.replace(/\n/ | data.forEach(function (item) {//to do rebuild
// Each of these sample items should have a reference to a particular group.
item.group = Groups[0];//通过上面的ajax请求获取到的都是laiwang主墙信息,所以取Groups数组中的第0项:laiwang
//item.key = item.id;
item.itemPublisherAvatar = item.publisher.avatar;
item.title = item.publisher.name;
item.subtitle = transformDate(item.createdAt);
item.description = item.content.substr(0, 100);
item.content = item.content;
item.backgroundImage = (!!(item.attachments[0]) && item.attachments[0].picture) ? item.attachments[0].picture : lightGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = (!item.attachments[0] || !item.attachments[0].picture) ? item.description : "";
//关于评论
//if (!!item.commentCount && item.commentCount !== 0) {
// //commentsList = [];
// item.comments.forEach(function (v) {
// v.item = item;
// //v.item.key = item.id;
// v.commentorLink = __API_DOMAIN__ + "/u/" + v.commentor.id;
// v.commentorAvatar = v.commentor.avatar;
// v.commentorName = v.commentor.name;
// v.commentCreatedAt = transformDate(v.createdAt);
// v.comment = v.content;
// commentsList.push(v);
// });
// //item.comments = commentsList;
//}
list.push(item);
});
}
});
}
//获取好友列表
function getFriends() {
var postData = {
'type': 'FOLLOWING',
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/relationship/friend/list',
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
data.forEach(function (item) {
item.group = Groups[2];
//item.key = item.id;
item.itemPublisherAvatar = item.avatar;
item.title = item.name;
item.subtitle = item.connectionType;
item.description = "";
item.content = "";
item.backgroundImage = mediumGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = "";
list.push(item);
});
}
})
}
//转换时间格式:毫秒-->yyyy-MM-dd HH:mm:ss
function transformDate(ms) {
var sDate = new Date(ms);
sDate = sDate.getFullYear() + "-" + (sDate.getMonth() + 1) + "-" + sDate.getDate() + " " + sDate.getHours() + ":" + sDate.getMinutes() + ":" + sDate.getSeconds();
return sDate;
}
var list = new WinJS.Binding.List();
//var commentsList = new WinJS.Binding.List();
getStream();
getFriends();
//取出所有的item。是经过“组化”的item。“组化”就是“组化”,一种特殊的数据结构,我也说不清
var groupedItems = list.createGrouped(groupKeySelector, groupDataSelector);
//取出所有的group。由上面“经过组化的item”提取而来,是与上面相同的一种特殊的数据结构
//貌似是一种由下级到上级的“逆向工程”
//var groups = groupedItems.groups;
//存放每个item的评论
//var comments = commentsList.createGrouped(groupKeySelector, groupDataSelector);
WinJS.Namespace.define("data", {
API_DOMAIN: __API_DOMAIN__,
DOMAIN:__DOMAIN__,
items: groupedItems,
groups: groupedItems.groups,
getItemsFromGroup: getItemsFromGroup,
//getCommentsFromItem: getCommentsFromItem,
transformDate: transformDate
});
})();
var myCellSpanningData = new WinJS.Binding.List([
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "largeItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "mediumItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "mediumItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "smallItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "smallItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "mediumItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "largeItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "mediumItem" }
]);
| gi, '<br/>');
}
| conditional_block |
data.js | (function () {
"use strict";
var __DOMAIN__ = 'http://laiwang.com';
var __API_DOMAIN__ = 'http://api.laiwang.com/v1';
var __LENGTH__ = 25;
// These three strings encode placeholder images. You will want to set the backgroundImage property in your real data to be URLs to images.
var lightGray = "../images/item_bac01.jpg"; //"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY7h4+cp/AAhpA3h+ANDKAAAAAElFTkSuQmCC";
var mediumGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY5g8dcZ/AAY/AsAlWFQ+AAAAAElFTkSuQmCC";
var darkGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY3B0cPoPAANMAcOba1BlAAAAAElFTkSuQmCC";
// Each of these sample groups must have a unique key to be displayed
//对以上的group进行了具体定义:laiwang:来往主墙;event:在一起;friend:好友
var Groups = [
{ key: "laiwang1", title: "来往", subtitle: "laiwang subtitle title", backgroundImage: darkGray, description: "this is the laiwang brief wall." },
{ key: "laiwang2", title: "在一起", subtitle: "event subtitle title", backgroundImage: darkGray, description: "this is the event lists." },
{ key: "laiwang3", title: "好友", subtitle: "friend subtitle title", backgroundImage: darkGray, description: "this is the all friend." }
]
function groupKeySelector(item) {
return item.group.key;
}
function groupDataSelector(item) {
return item.group;
}
//function groupSorter(item) {
// return [0,1];
//}
// This function returns a WinJS.Binding.List containing only the items that belong to the provided group.
//从list中,根据group获取它所包含的item
function getItemsFromGroup(group) {
return list.createFiltered(function (item) { return item.group.key === group.key; });
}
//取出item中的评论
//function getCommentsFromItem(item) {
// //var items = getItemsFromGroup(item.group);
// return commentsList.createFiltered(function (c) { return c.item.id === item.id; });
//}
// TODO: Replace the data with your real data.
// You can add data from asynchronous sources whenever it becomes available.
//sampleItems.forEach(function (item) {
// list.push(item);
//});
//重新设置jQuery的ajax
function ajaxSet() {
$.ajaxSetup({
cache: false,
dataType: 'json',
data: {},
beforeSend: function (jqXHR, settings) {
if (typeof this.data === 'string') {
this.data = this.data.replace(/%[0-1][0-9a-f]/g, '%20');
this.data += '&access_token=' + localStorage['access_token'];
} else if (typeof this.data === 'object') {
this.data['access_token'] = localStorage['access_token'];
}
this._beforeSend && this._beforeSend(jqXHR, settings);
},
error: function (jqXHR, textStatus, errorThrown) {
this._error && this._error(jqXHR, textStatus, errorThrown);
this._failure && this._failure(jqXHR, textStatus, errorThrown);
var errorObject = $.parseJSON(jqXHR.responseText);
if (errorObject.error === "invalid_token" || errorObject.error === "expired_token" || errorObject.error === "invalid_grant") {
authentication.refreshAccessToken(function () {
//babylon.init();//?????????????????????????
//$('#index').trigger('click');
}, function () {
authentication.toAuthorizePage();
});
}
},
success: function (data, textStatus, jqXHR) {
if (!data) return;
this._success && this._success(data, textStatus, jqXHR);
}
});
}
ajaxSet();
//根据id获得某人的主墙
//没有id也行
function getStream(id) {
var id = id || '';
//var subUri = {
// stream: '/feed/post/main/list',
// incoming: '/feed/p | I_DOMAIN__ + '/relationship/friend/list',
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
data.forEach(function (item) {
item.group = Groups[2];
//item.key = item.id;
item.itemPublisherAvatar = item.avatar;
item.title = item.name;
item.subtitle = item.connectionType;
item.description = "";
item.content = "";
item.backgroundImage = mediumGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = "";
list.push(item);
});
}
})
}
//转换时间格式:毫秒-->yyyy-MM-dd HH:mm:ss
function transformDate(ms) {
var sDate = new Date(ms);
sDate = sDate.getFullYear() + "-" + (sDate.getMonth() + 1) + "-" + sDate.getDate() + " " + sDate.getHours() + ":" + sDate.getMinutes() + ":" + sDate.getSeconds();
return sDate;
}
var list = new WinJS.Binding.List();
//var commentsList = new WinJS.Binding.List();
getStream();
getFriends();
//取出所有的item。是经过“组化”的item。“组化”就是“组化”,一种特殊的数据结构,我也说不清
var groupedItems = list.createGrouped(groupKeySelector, groupDataSelector);
//取出所有的group。由上面“经过组化的item”提取而来,是与上面相同的一种特殊的数据结构
//貌似是一种由下级到上级的“逆向工程”
//var groups = groupedItems.groups;
//存放每个item的评论
//var comments = commentsList.createGrouped(groupKeySelector, groupDataSelector);
WinJS.Namespace.define("data", {
API_DOMAIN: __API_DOMAIN__,
DOMAIN:__DOMAIN__,
items: groupedItems,
groups: groupedItems.groups,
getItemsFromGroup: getItemsFromGroup,
//getCommentsFromItem: getCommentsFromItem,
transformDate: transformDate
});
})();
var myCellSpanningData = new WinJS.Binding.List([
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "largeItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "mediumItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "mediumItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "smallItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "smallItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "mediumItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "largeItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "mediumItem" }
]);
| ost/incoming/list',
// group: '/feed/post/circle/list'
//};
var postData = {
'cursor': 0,
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/feed/post/main/list', //获取laiwang主墙
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
for (var index in data) {
data[index].content = data[index].content.replace(/\n/gi, '<br/>');
}
data.forEach(function (item) {//to do rebuild
// Each of these sample items should have a reference to a particular group.
item.group = Groups[0];//通过上面的ajax请求获取到的都是laiwang主墙信息,所以取Groups数组中的第0项:laiwang
//item.key = item.id;
item.itemPublisherAvatar = item.publisher.avatar;
item.title = item.publisher.name;
item.subtitle = transformDate(item.createdAt);
item.description = item.content.substr(0, 100);
item.content = item.content;
item.backgroundImage = (!!(item.attachments[0]) && item.attachments[0].picture) ? item.attachments[0].picture : lightGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = (!item.attachments[0] || !item.attachments[0].picture) ? item.description : "";
//关于评论
//if (!!item.commentCount && item.commentCount !== 0) {
// //commentsList = [];
// item.comments.forEach(function (v) {
// v.item = item;
// //v.item.key = item.id;
// v.commentorLink = __API_DOMAIN__ + "/u/" + v.commentor.id;
// v.commentorAvatar = v.commentor.avatar;
// v.commentorName = v.commentor.name;
// v.commentCreatedAt = transformDate(v.createdAt);
// v.comment = v.content;
// commentsList.push(v);
// });
// //item.comments = commentsList;
//}
list.push(item);
});
}
});
}
//获取好友列表
function getFriends() {
var postData = {
'type': 'FOLLOWING',
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __AP | identifier_body |
data.js | (function () {
"use strict";
|
// These three strings encode placeholder images. You will want to set the backgroundImage property in your real data to be URLs to images.
var lightGray = "../images/item_bac01.jpg"; //"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY7h4+cp/AAhpA3h+ANDKAAAAAElFTkSuQmCC";
var mediumGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY5g8dcZ/AAY/AsAlWFQ+AAAAAElFTkSuQmCC";
var darkGray = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAANSURBVBhXY3B0cPoPAANMAcOba1BlAAAAAElFTkSuQmCC";
// Each of these sample groups must have a unique key to be displayed
//对以上的group进行了具体定义:laiwang:来往主墙;event:在一起;friend:好友
var Groups = [
{ key: "laiwang1", title: "来往", subtitle: "laiwang subtitle title", backgroundImage: darkGray, description: "this is the laiwang brief wall." },
{ key: "laiwang2", title: "在一起", subtitle: "event subtitle title", backgroundImage: darkGray, description: "this is the event lists." },
{ key: "laiwang3", title: "好友", subtitle: "friend subtitle title", backgroundImage: darkGray, description: "this is the all friend." }
]
function groupKeySelector(item) {
return item.group.key;
}
function groupDataSelector(item) {
return item.group;
}
//function groupSorter(item) {
// return [0,1];
//}
// This function returns a WinJS.Binding.List containing only the items that belong to the provided group.
//从list中,根据group获取它所包含的item
function getItemsFromGroup(group) {
return list.createFiltered(function (item) { return item.group.key === group.key; });
}
//取出item中的评论
//function getCommentsFromItem(item) {
// //var items = getItemsFromGroup(item.group);
// return commentsList.createFiltered(function (c) { return c.item.id === item.id; });
//}
// TODO: Replace the data with your real data.
// You can add data from asynchronous sources whenever it becomes available.
//sampleItems.forEach(function (item) {
// list.push(item);
//});
//重新设置jQuery的ajax
function ajaxSet() {
$.ajaxSetup({
cache: false,
dataType: 'json',
data: {},
beforeSend: function (jqXHR, settings) {
if (typeof this.data === 'string') {
this.data = this.data.replace(/%[0-1][0-9a-f]/g, '%20');
this.data += '&access_token=' + localStorage['access_token'];
} else if (typeof this.data === 'object') {
this.data['access_token'] = localStorage['access_token'];
}
this._beforeSend && this._beforeSend(jqXHR, settings);
},
error: function (jqXHR, textStatus, errorThrown) {
this._error && this._error(jqXHR, textStatus, errorThrown);
this._failure && this._failure(jqXHR, textStatus, errorThrown);
var errorObject = $.parseJSON(jqXHR.responseText);
if (errorObject.error === "invalid_token" || errorObject.error === "expired_token" || errorObject.error === "invalid_grant") {
authentication.refreshAccessToken(function () {
//babylon.init();//?????????????????????????
//$('#index').trigger('click');
}, function () {
authentication.toAuthorizePage();
});
}
},
success: function (data, textStatus, jqXHR) {
if (!data) return;
this._success && this._success(data, textStatus, jqXHR);
}
});
}
ajaxSet();
//根据id获得某人的主墙
//没有id也行
function getStream(id) {
var id = id || '';
//var subUri = {
// stream: '/feed/post/main/list',
// incoming: '/feed/post/incoming/list',
// group: '/feed/post/circle/list'
//};
var postData = {
'cursor': 0,
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/feed/post/main/list', //获取laiwang主墙
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
for (var index in data) {
data[index].content = data[index].content.replace(/\n/gi, '<br/>');
}
data.forEach(function (item) {//to do rebuild
// Each of these sample items should have a reference to a particular group.
item.group = Groups[0];//通过上面的ajax请求获取到的都是laiwang主墙信息,所以取Groups数组中的第0项:laiwang
//item.key = item.id;
item.itemPublisherAvatar = item.publisher.avatar;
item.title = item.publisher.name;
item.subtitle = transformDate(item.createdAt);
item.description = item.content.substr(0, 100);
item.content = item.content;
item.backgroundImage = (!!(item.attachments[0]) && item.attachments[0].picture) ? item.attachments[0].picture : lightGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = (!item.attachments[0] || !item.attachments[0].picture) ? item.description : "";
//关于评论
//if (!!item.commentCount && item.commentCount !== 0) {
// //commentsList = [];
// item.comments.forEach(function (v) {
// v.item = item;
// //v.item.key = item.id;
// v.commentorLink = __API_DOMAIN__ + "/u/" + v.commentor.id;
// v.commentorAvatar = v.commentor.avatar;
// v.commentorName = v.commentor.name;
// v.commentCreatedAt = transformDate(v.createdAt);
// v.comment = v.content;
// commentsList.push(v);
// });
// //item.comments = commentsList;
//}
list.push(item);
});
}
});
}
//获取好友列表
function getFriends() {
var postData = {
'type': 'FOLLOWING',
'size': __LENGTH__,
'access_token': localStorage['access_token']
};
$.ajax({
global: false,
url: __API_DOMAIN__ + '/relationship/friend/list',
type: 'GET',
data: postData,
_success: function (data) {
data = data.values;
//如果取得的值为空
if (data.length === 0) {
return;
}
data.forEach(function (item) {
item.group = Groups[2];
//item.key = item.id;
item.itemPublisherAvatar = item.avatar;
item.title = item.name;
item.subtitle = item.connectionType;
item.description = "";
item.content = "";
item.backgroundImage = mediumGray;
//如果用户没有发图片,就要用内容代替图片
item.imageReplacer = "";
list.push(item);
});
}
})
}
//转换时间格式:毫秒-->yyyy-MM-dd HH:mm:ss
function transformDate(ms) {
var sDate = new Date(ms);
sDate = sDate.getFullYear() + "-" + (sDate.getMonth() + 1) + "-" + sDate.getDate() + " " + sDate.getHours() + ":" + sDate.getMinutes() + ":" + sDate.getSeconds();
return sDate;
}
var list = new WinJS.Binding.List();
//var commentsList = new WinJS.Binding.List();
getStream();
getFriends();
//取出所有的item。是经过“组化”的item。“组化”就是“组化”,一种特殊的数据结构,我也说不清
var groupedItems = list.createGrouped(groupKeySelector, groupDataSelector);
//取出所有的group。由上面“经过组化的item”提取而来,是与上面相同的一种特殊的数据结构
//貌似是一种由下级到上级的“逆向工程”
//var groups = groupedItems.groups;
//存放每个item的评论
//var comments = commentsList.createGrouped(groupKeySelector, groupDataSelector);
WinJS.Namespace.define("data", {
API_DOMAIN: __API_DOMAIN__,
DOMAIN:__DOMAIN__,
items: groupedItems,
groups: groupedItems.groups,
getItemsFromGroup: getItemsFromGroup,
//getCommentsFromItem: getCommentsFromItem,
transformDate: transformDate
});
})();
var myCellSpanningData = new WinJS.Binding.List([
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "largeItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "mediumItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "mediumItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "mediumItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "smallItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "smallItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "smallItem" },
{ title: "Banana Blast", text: "Low-fat frozen yogurt", picture: "images/60Banana.png", type: "smallItem" },
{ title: "Lavish Lemon Ice", text: "Sorbet", picture: "images/60Lemon.png", type: "smallItem" },
{ title: "Marvelous Mint", text: "Gelato", picture: "images/60Mint.png", type: "mediumItem" },
{ title: "Creamy Orange", text: "Sorbet", picture: "images/60Orange.png", type: "smallItem" },
{ title: "Succulent Strawberry", text: "Sorbet", picture: "images/60Strawberry.png", type: "largeItem" },
{ title: "Very Vanilla", text: "Ice Cream", picture: "images/60Vanilla.png", type: "mediumItem" }
]); | var __DOMAIN__ = 'http://laiwang.com';
var __API_DOMAIN__ = 'http://api.laiwang.com/v1';
var __LENGTH__ = 25;
| random_line_split |
integration.go | //
// Author:: Salim Afiune Maya (<[email protected]>)
// Copyright:: Copyright 2020, Lacework Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cmd
import (
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/mitchellh/mapstructure"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/lacework/go-sdk/api"
)
var (
// integrationCmd represents the integration command
integrationCmd = &cobra.Command{
Use: "integration",
Aliases: []string{"int"},
Short: "manage external integrations",
Long: `Manage external integrations with the Lacework platform`,
}
// integrationListCmd represents the list sub-command inside the integration command
integrationListCmd = &cobra.Command{
Use: "list",
Short: "list all available external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integrations, err := lacework.Integrations.List()
if err != nil {
return errors.Wrap(err, "unable to get integrations")
}
if cli.JSONOutput() {
return cli.OutputJSON(integrations.Data)
}
cli.OutputHuman(buildIntegrationsTable(integrations.Data))
return nil
},
}
// integrationShowCmd represents the show sub-command inside the integration command
integrationShowCmd = &cobra.Command{
Use: "show <int_guid>",
Short: "Show details about a specific external integration",
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integration, err := lacework.Integrations.Get(args[0])
if err != nil {
return errors.Wrap(err, "unable to get integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(integration.Data)
}
cli.OutputHuman(buildIntegrationsTable(integration.Data))
cli.OutputHuman("\n")
cli.OutputHuman(buildIntDetailsTable(integration.Data))
return nil
},
}
// integrationCreateCmd represents the create sub-command inside the integration command
integrationCreateCmd = &cobra.Command{
Use: "create",
Short: "create an external integrations",
Args: cobra.NoArgs,
Long: `Creates an external integration in your account through an interactive session.`,
RunE: func(_ *cobra.Command, _ []string) error {
if !cli.InteractiveMode() {
return errors.New("interactive mode is disabled")
}
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
err = promptCreateIntegration(lacework)
if err != nil {
return errors.Wrap(err, "unable to create integration")
}
cli.OutputHuman("The integration was created.\n")
return nil
},
}
// integrationUpdateCmd represents the update sub-command inside the integration command
integrationUpdateCmd = &cobra.Command{
Use: "update",
Hidden: true,
Short: "update an external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
return nil
},
}
// integrationDeleteCmd represents the delete sub-command inside the integration command
integrationDeleteCmd = &cobra.Command{
Use: "delete <int_guid>",
Short: "delete an external integrations",
Long: `Delete an external integration by providing its integration GUID. Integration
GUIDs can be found by using the 'lacework integration list' command.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
cli.Log.Info("deleting integration", "int_guid", args[0])
cli.StartProgress(" Deleting integration...")
response, err := lacework.Integrations.Delete(args[0])
cli.StopProgress()
if err != nil {
return errors.Wrap(err, "unable to delete integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(response.Data)
}
cli.OutputHuman("The integration %s was deleted.\n", args[0])
return nil
},
}
)
func init() {
// add the integration command
rootCmd.AddCommand(integrationCmd)
// add sub-commands to the integration command
integrationCmd.AddCommand(integrationListCmd)
integrationCmd.AddCommand(integrationShowCmd)
integrationCmd.AddCommand(integrationCreateCmd)
integrationCmd.AddCommand(integrationUpdateCmd)
integrationCmd.AddCommand(integrationDeleteCmd)
}
func promptCreateIntegration(lacework *api.Client) error {
var (
integration = ""
prompt = &survey.Select{
Message: "Choose an integration type to create: ",
Options: []string{
"Docker Hub",
"AWS Config",
"AWS CloudTrail",
"GCP Config",
"GCP Audit Log",
"Azure Config",
"Azure Activity Log",
//"Docker V2 Registry",
//"Amazon Container Registry",
//"Google Container Registry",
//"Snowflake Data Share",
},
}
err = survey.AskOne(prompt, &integration)
)
if err != nil {
return err
}
switch integration {
case "Docker Hub":
return createDockerHubIntegration(lacework)
case "AWS Config":
return createAwsConfigIntegration(lacework)
case "AWS CloudTrail":
return createAwsCloudTrailIntegration(lacework)
case "GCP Config":
return createGcpConfigIntegration(lacework)
case "GCP Audit Log":
return createGcpAuditLogIntegration(lacework)
case "Azure Config":
return createAzureConfigIntegration(lacework)
case "Azure Activity Log":
return createAzureActivityLogIntegration(lacework)
//case "Docker V2 Registry":
//case "Amazon Container Registry":
//case "Google Container Registry":
//case "Snowflake Data Share":
default:
return errors.New("unknown integration type")
}
}
func integrationsTable(integrations []api.RawIntegration) [][]string {
out := [][]string{}
for _, idata := range integrations {
out = append(out, []string{
idata.IntgGuid,
idata.Name,
idata.Type,
idata.Status(),
idata.StateString(),
})
}
return out
}
func buildIntegrationsTable(integrations []api.RawIntegration) string {
var (
tableBuilder = &strings.Builder{}
t = tablewriter.NewWriter(tableBuilder)
)
t.SetHeader([]string{
"Integration GUID",
"Name",
"Type",
"Status",
"State",
})
t.SetBorder(false)
t.AppendBulk(integrationsTable(integrations))
t.Render()
return tableBuilder.String()
}
func buildIntDetailsTable(integrations []api.RawIntegration) string {
var (
main = &strings.Builder{}
details = &strings.Builder{}
t = tablewriter.NewWriter(details)
)
t.SetBorder(false)
t.SetAlignment(tablewriter.ALIGN_LEFT)
if len(integrations) != 0 {
integration := integrations[0]
t.AppendBulk(reflectIntegrationData(integration))
t.AppendBulk(buildIntegrationState(integration.State))
}
t.Render()
t = tablewriter.NewWriter(main)
t.SetBorder(false)
t.SetAutoWrapText(false)
t.SetHeader([]string{"INTEGRATION DETAILS"})
t.Append([]string{details.String()})
t.Render()
return main.String()
}
func buildIntegrationState(state *api.IntegrationState) [][]string {
if state != nil {
return [][]string{
[]string{"LAST UPDATED TIME", state.LastUpdatedTime},
[]string{"LAST SUCCESSFUL TIME", state.LastSuccessfulTime},
}
}
return [][]string{}
}
func reflectIntegrationData(raw api.RawIntegration) [][]string {
switch raw.Type {
case api.GcpCfgIntegration.String(),
api.GcpAuditLogIntegration.String():
var iData api.GcpIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"LEVEL", iData.IdType},
[]string{"ORG/PROJECT ID", iData.ID},
[]string{"CLIENT ID", iData.Credentials.ClientId},
[]string{"CLIENT EMAIL", iData.Credentials.ClientEmail},
[]string{"PRIVATE KEY ID", iData.Credentials.PrivateKeyId},
}
if iData.SubscriptionName != "" {
return append(out, []string{"SUBSCRIPTION NAME", iData.SubscriptionName})
}
return out
case api.AwsCfgIntegration.String(),
api.AwsCloudTrailIntegration.String():
var iData api.AwsIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"ROLE ARN", iData.Credentials.RoleArn},
[]string{"EXTERNAL ID", iData.Credentials.ExternalId},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
case api.AzureCfgIntegration.String(),
api.AzureActivityLogIntegration.String():
var iData api.AzureIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"CLIENT ID", iData.Credentials.ClientID},
[]string{"CLIENT SECRET", iData.Credentials.ClientSecret},
[]string{"TENANT ID", iData.TenantID},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
default:
out := [][]string{}
for key, value := range deepKeyValueExtract(raw.Data) {
out = append(out, []string{key, value})
}
return out
}
return [][]string{}
}
func deepKeyValueExtract(v interface{}) map[string]string {
out := map[string]string{}
m, ok := v.(map[string]interface{})
if !ok {
return out
}
| out[key] = s
} else {
deepMap := deepKeyValueExtract(value)
for deepK, deepV := range deepMap {
out[deepK] = deepV
}
}
}
return out
} | for key, value := range m {
if s, ok := value.(string); ok { | random_line_split |
integration.go | //
// Author:: Salim Afiune Maya (<[email protected]>)
// Copyright:: Copyright 2020, Lacework Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cmd
import (
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/mitchellh/mapstructure"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/lacework/go-sdk/api"
)
var (
// integrationCmd represents the integration command
integrationCmd = &cobra.Command{
Use: "integration",
Aliases: []string{"int"},
Short: "manage external integrations",
Long: `Manage external integrations with the Lacework platform`,
}
// integrationListCmd represents the list sub-command inside the integration command
integrationListCmd = &cobra.Command{
Use: "list",
Short: "list all available external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integrations, err := lacework.Integrations.List()
if err != nil {
return errors.Wrap(err, "unable to get integrations")
}
if cli.JSONOutput() {
return cli.OutputJSON(integrations.Data)
}
cli.OutputHuman(buildIntegrationsTable(integrations.Data))
return nil
},
}
// integrationShowCmd represents the show sub-command inside the integration command
integrationShowCmd = &cobra.Command{
Use: "show <int_guid>",
Short: "Show details about a specific external integration",
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integration, err := lacework.Integrations.Get(args[0])
if err != nil {
return errors.Wrap(err, "unable to get integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(integration.Data)
}
cli.OutputHuman(buildIntegrationsTable(integration.Data))
cli.OutputHuman("\n")
cli.OutputHuman(buildIntDetailsTable(integration.Data))
return nil
},
}
// integrationCreateCmd represents the create sub-command inside the integration command
integrationCreateCmd = &cobra.Command{
Use: "create",
Short: "create an external integrations",
Args: cobra.NoArgs,
Long: `Creates an external integration in your account through an interactive session.`,
RunE: func(_ *cobra.Command, _ []string) error {
if !cli.InteractiveMode() {
return errors.New("interactive mode is disabled")
}
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
err = promptCreateIntegration(lacework)
if err != nil {
return errors.Wrap(err, "unable to create integration")
}
cli.OutputHuman("The integration was created.\n")
return nil
},
}
// integrationUpdateCmd represents the update sub-command inside the integration command
integrationUpdateCmd = &cobra.Command{
Use: "update",
Hidden: true,
Short: "update an external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
return nil
},
}
// integrationDeleteCmd represents the delete sub-command inside the integration command
integrationDeleteCmd = &cobra.Command{
Use: "delete <int_guid>",
Short: "delete an external integrations",
Long: `Delete an external integration by providing its integration GUID. Integration
GUIDs can be found by using the 'lacework integration list' command.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
cli.Log.Info("deleting integration", "int_guid", args[0])
cli.StartProgress(" Deleting integration...")
response, err := lacework.Integrations.Delete(args[0])
cli.StopProgress()
if err != nil {
return errors.Wrap(err, "unable to delete integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(response.Data)
}
cli.OutputHuman("The integration %s was deleted.\n", args[0])
return nil
},
}
)
func init() {
// add the integration command
rootCmd.AddCommand(integrationCmd)
// add sub-commands to the integration command
integrationCmd.AddCommand(integrationListCmd)
integrationCmd.AddCommand(integrationShowCmd)
integrationCmd.AddCommand(integrationCreateCmd)
integrationCmd.AddCommand(integrationUpdateCmd)
integrationCmd.AddCommand(integrationDeleteCmd)
}
func promptCreateIntegration(lacework *api.Client) error {
var (
integration = ""
prompt = &survey.Select{
Message: "Choose an integration type to create: ",
Options: []string{
"Docker Hub",
"AWS Config",
"AWS CloudTrail",
"GCP Config",
"GCP Audit Log",
"Azure Config",
"Azure Activity Log",
//"Docker V2 Registry",
//"Amazon Container Registry",
//"Google Container Registry",
//"Snowflake Data Share",
},
}
err = survey.AskOne(prompt, &integration)
)
if err != nil {
return err
}
switch integration {
case "Docker Hub":
return createDockerHubIntegration(lacework)
case "AWS Config":
return createAwsConfigIntegration(lacework)
case "AWS CloudTrail":
return createAwsCloudTrailIntegration(lacework)
case "GCP Config":
return createGcpConfigIntegration(lacework)
case "GCP Audit Log":
return createGcpAuditLogIntegration(lacework)
case "Azure Config":
return createAzureConfigIntegration(lacework)
case "Azure Activity Log":
return createAzureActivityLogIntegration(lacework)
//case "Docker V2 Registry":
//case "Amazon Container Registry":
//case "Google Container Registry":
//case "Snowflake Data Share":
default:
return errors.New("unknown integration type")
}
}
func integrationsTable(integrations []api.RawIntegration) [][]string {
out := [][]string{}
for _, idata := range integrations {
out = append(out, []string{
idata.IntgGuid,
idata.Name,
idata.Type,
idata.Status(),
idata.StateString(),
})
}
return out
}
func buildIntegrationsTable(integrations []api.RawIntegration) string {
var (
tableBuilder = &strings.Builder{}
t = tablewriter.NewWriter(tableBuilder)
)
t.SetHeader([]string{
"Integration GUID",
"Name",
"Type",
"Status",
"State",
})
t.SetBorder(false)
t.AppendBulk(integrationsTable(integrations))
t.Render()
return tableBuilder.String()
}
func buildIntDetailsTable(integrations []api.RawIntegration) string {
var (
main = &strings.Builder{}
details = &strings.Builder{}
t = tablewriter.NewWriter(details)
)
t.SetBorder(false)
t.SetAlignment(tablewriter.ALIGN_LEFT)
if len(integrations) != 0 {
integration := integrations[0]
t.AppendBulk(reflectIntegrationData(integration))
t.AppendBulk(buildIntegrationState(integration.State))
}
t.Render()
t = tablewriter.NewWriter(main)
t.SetBorder(false)
t.SetAutoWrapText(false)
t.SetHeader([]string{"INTEGRATION DETAILS"})
t.Append([]string{details.String()})
t.Render()
return main.String()
}
func buildIntegrationState(state *api.IntegrationState) [][]string {
if state != nil {
return [][]string{
[]string{"LAST UPDATED TIME", state.LastUpdatedTime},
[]string{"LAST SUCCESSFUL TIME", state.LastSuccessfulTime},
}
}
return [][]string{}
}
func reflectIntegrationData(raw api.RawIntegration) [][]string |
func deepKeyValueExtract(v interface{}) map[string]string {
out := map[string]string{}
m, ok := v.(map[string]interface{})
if !ok {
return out
}
for key, value := range m {
if s, ok := value.(string); ok {
out[key] = s
} else {
deepMap := deepKeyValueExtract(value)
for deepK, deepV := range deepMap {
out[deepK] = deepV
}
}
}
return out
}
| {
switch raw.Type {
case api.GcpCfgIntegration.String(),
api.GcpAuditLogIntegration.String():
var iData api.GcpIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"LEVEL", iData.IdType},
[]string{"ORG/PROJECT ID", iData.ID},
[]string{"CLIENT ID", iData.Credentials.ClientId},
[]string{"CLIENT EMAIL", iData.Credentials.ClientEmail},
[]string{"PRIVATE KEY ID", iData.Credentials.PrivateKeyId},
}
if iData.SubscriptionName != "" {
return append(out, []string{"SUBSCRIPTION NAME", iData.SubscriptionName})
}
return out
case api.AwsCfgIntegration.String(),
api.AwsCloudTrailIntegration.String():
var iData api.AwsIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"ROLE ARN", iData.Credentials.RoleArn},
[]string{"EXTERNAL ID", iData.Credentials.ExternalId},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
case api.AzureCfgIntegration.String(),
api.AzureActivityLogIntegration.String():
var iData api.AzureIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"CLIENT ID", iData.Credentials.ClientID},
[]string{"CLIENT SECRET", iData.Credentials.ClientSecret},
[]string{"TENANT ID", iData.TenantID},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
default:
out := [][]string{}
for key, value := range deepKeyValueExtract(raw.Data) {
out = append(out, []string{key, value})
}
return out
}
return [][]string{}
} | identifier_body |
integration.go | //
// Author:: Salim Afiune Maya (<[email protected]>)
// Copyright:: Copyright 2020, Lacework Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cmd
import (
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/mitchellh/mapstructure"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/lacework/go-sdk/api"
)
var (
// integrationCmd represents the integration command
integrationCmd = &cobra.Command{
Use: "integration",
Aliases: []string{"int"},
Short: "manage external integrations",
Long: `Manage external integrations with the Lacework platform`,
}
// integrationListCmd represents the list sub-command inside the integration command
integrationListCmd = &cobra.Command{
Use: "list",
Short: "list all available external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integrations, err := lacework.Integrations.List()
if err != nil {
return errors.Wrap(err, "unable to get integrations")
}
if cli.JSONOutput() {
return cli.OutputJSON(integrations.Data)
}
cli.OutputHuman(buildIntegrationsTable(integrations.Data))
return nil
},
}
// integrationShowCmd represents the show sub-command inside the integration command
integrationShowCmd = &cobra.Command{
Use: "show <int_guid>",
Short: "Show details about a specific external integration",
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integration, err := lacework.Integrations.Get(args[0])
if err != nil {
return errors.Wrap(err, "unable to get integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(integration.Data)
}
cli.OutputHuman(buildIntegrationsTable(integration.Data))
cli.OutputHuman("\n")
cli.OutputHuman(buildIntDetailsTable(integration.Data))
return nil
},
}
// integrationCreateCmd represents the create sub-command inside the integration command
integrationCreateCmd = &cobra.Command{
Use: "create",
Short: "create an external integrations",
Args: cobra.NoArgs,
Long: `Creates an external integration in your account through an interactive session.`,
RunE: func(_ *cobra.Command, _ []string) error {
if !cli.InteractiveMode() {
return errors.New("interactive mode is disabled")
}
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
err = promptCreateIntegration(lacework)
if err != nil {
return errors.Wrap(err, "unable to create integration")
}
cli.OutputHuman("The integration was created.\n")
return nil
},
}
// integrationUpdateCmd represents the update sub-command inside the integration command
integrationUpdateCmd = &cobra.Command{
Use: "update",
Hidden: true,
Short: "update an external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
return nil
},
}
// integrationDeleteCmd represents the delete sub-command inside the integration command
integrationDeleteCmd = &cobra.Command{
Use: "delete <int_guid>",
Short: "delete an external integrations",
Long: `Delete an external integration by providing its integration GUID. Integration
GUIDs can be found by using the 'lacework integration list' command.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
cli.Log.Info("deleting integration", "int_guid", args[0])
cli.StartProgress(" Deleting integration...")
response, err := lacework.Integrations.Delete(args[0])
cli.StopProgress()
if err != nil {
return errors.Wrap(err, "unable to delete integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(response.Data)
}
cli.OutputHuman("The integration %s was deleted.\n", args[0])
return nil
},
}
)
func init() {
// add the integration command
rootCmd.AddCommand(integrationCmd)
// add sub-commands to the integration command
integrationCmd.AddCommand(integrationListCmd)
integrationCmd.AddCommand(integrationShowCmd)
integrationCmd.AddCommand(integrationCreateCmd)
integrationCmd.AddCommand(integrationUpdateCmd)
integrationCmd.AddCommand(integrationDeleteCmd)
}
func promptCreateIntegration(lacework *api.Client) error {
var (
integration = ""
prompt = &survey.Select{
Message: "Choose an integration type to create: ",
Options: []string{
"Docker Hub",
"AWS Config",
"AWS CloudTrail",
"GCP Config",
"GCP Audit Log",
"Azure Config",
"Azure Activity Log",
//"Docker V2 Registry",
//"Amazon Container Registry",
//"Google Container Registry",
//"Snowflake Data Share",
},
}
err = survey.AskOne(prompt, &integration)
)
if err != nil {
return err
}
switch integration {
case "Docker Hub":
return createDockerHubIntegration(lacework)
case "AWS Config":
return createAwsConfigIntegration(lacework)
case "AWS CloudTrail":
return createAwsCloudTrailIntegration(lacework)
case "GCP Config":
return createGcpConfigIntegration(lacework)
case "GCP Audit Log":
return createGcpAuditLogIntegration(lacework)
case "Azure Config":
return createAzureConfigIntegration(lacework)
case "Azure Activity Log":
return createAzureActivityLogIntegration(lacework)
//case "Docker V2 Registry":
//case "Amazon Container Registry":
//case "Google Container Registry":
//case "Snowflake Data Share":
default:
return errors.New("unknown integration type")
}
}
func integrationsTable(integrations []api.RawIntegration) [][]string {
out := [][]string{}
for _, idata := range integrations {
out = append(out, []string{
idata.IntgGuid,
idata.Name,
idata.Type,
idata.Status(),
idata.StateString(),
})
}
return out
}
func buildIntegrationsTable(integrations []api.RawIntegration) string {
var (
tableBuilder = &strings.Builder{}
t = tablewriter.NewWriter(tableBuilder)
)
t.SetHeader([]string{
"Integration GUID",
"Name",
"Type",
"Status",
"State",
})
t.SetBorder(false)
t.AppendBulk(integrationsTable(integrations))
t.Render()
return tableBuilder.String()
}
func buildIntDetailsTable(integrations []api.RawIntegration) string {
var (
main = &strings.Builder{}
details = &strings.Builder{}
t = tablewriter.NewWriter(details)
)
t.SetBorder(false)
t.SetAlignment(tablewriter.ALIGN_LEFT)
if len(integrations) != 0 {
integration := integrations[0]
t.AppendBulk(reflectIntegrationData(integration))
t.AppendBulk(buildIntegrationState(integration.State))
}
t.Render()
t = tablewriter.NewWriter(main)
t.SetBorder(false)
t.SetAutoWrapText(false)
t.SetHeader([]string{"INTEGRATION DETAILS"})
t.Append([]string{details.String()})
t.Render()
return main.String()
}
func buildIntegrationState(state *api.IntegrationState) [][]string {
if state != nil {
return [][]string{
[]string{"LAST UPDATED TIME", state.LastUpdatedTime},
[]string{"LAST SUCCESSFUL TIME", state.LastSuccessfulTime},
}
}
return [][]string{}
}
func reflectIntegrationData(raw api.RawIntegration) [][]string {
switch raw.Type {
case api.GcpCfgIntegration.String(),
api.GcpAuditLogIntegration.String():
var iData api.GcpIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"LEVEL", iData.IdType},
[]string{"ORG/PROJECT ID", iData.ID},
[]string{"CLIENT ID", iData.Credentials.ClientId},
[]string{"CLIENT EMAIL", iData.Credentials.ClientEmail},
[]string{"PRIVATE KEY ID", iData.Credentials.PrivateKeyId},
}
if iData.SubscriptionName != "" {
return append(out, []string{"SUBSCRIPTION NAME", iData.SubscriptionName})
}
return out
case api.AwsCfgIntegration.String(),
api.AwsCloudTrailIntegration.String():
var iData api.AwsIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"ROLE ARN", iData.Credentials.RoleArn},
[]string{"EXTERNAL ID", iData.Credentials.ExternalId},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
case api.AzureCfgIntegration.String(),
api.AzureActivityLogIntegration.String():
var iData api.AzureIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"CLIENT ID", iData.Credentials.ClientID},
[]string{"CLIENT SECRET", iData.Credentials.ClientSecret},
[]string{"TENANT ID", iData.TenantID},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
default:
out := [][]string{}
for key, value := range deepKeyValueExtract(raw.Data) {
out = append(out, []string{key, value})
}
return out
}
return [][]string{}
}
func deepKeyValueExtract(v interface{}) map[string]string {
out := map[string]string{}
m, ok := v.(map[string]interface{})
if !ok {
return out
}
for key, value := range m |
return out
}
| {
if s, ok := value.(string); ok {
out[key] = s
} else {
deepMap := deepKeyValueExtract(value)
for deepK, deepV := range deepMap {
out[deepK] = deepV
}
}
} | conditional_block |
integration.go | //
// Author:: Salim Afiune Maya (<[email protected]>)
// Copyright:: Copyright 2020, Lacework Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cmd
import (
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/mitchellh/mapstructure"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/lacework/go-sdk/api"
)
var (
// integrationCmd represents the integration command
integrationCmd = &cobra.Command{
Use: "integration",
Aliases: []string{"int"},
Short: "manage external integrations",
Long: `Manage external integrations with the Lacework platform`,
}
// integrationListCmd represents the list sub-command inside the integration command
integrationListCmd = &cobra.Command{
Use: "list",
Short: "list all available external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integrations, err := lacework.Integrations.List()
if err != nil {
return errors.Wrap(err, "unable to get integrations")
}
if cli.JSONOutput() {
return cli.OutputJSON(integrations.Data)
}
cli.OutputHuman(buildIntegrationsTable(integrations.Data))
return nil
},
}
// integrationShowCmd represents the show sub-command inside the integration command
integrationShowCmd = &cobra.Command{
Use: "show <int_guid>",
Short: "Show details about a specific external integration",
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
integration, err := lacework.Integrations.Get(args[0])
if err != nil {
return errors.Wrap(err, "unable to get integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(integration.Data)
}
cli.OutputHuman(buildIntegrationsTable(integration.Data))
cli.OutputHuman("\n")
cli.OutputHuman(buildIntDetailsTable(integration.Data))
return nil
},
}
// integrationCreateCmd represents the create sub-command inside the integration command
integrationCreateCmd = &cobra.Command{
Use: "create",
Short: "create an external integrations",
Args: cobra.NoArgs,
Long: `Creates an external integration in your account through an interactive session.`,
RunE: func(_ *cobra.Command, _ []string) error {
if !cli.InteractiveMode() {
return errors.New("interactive mode is disabled")
}
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
err = promptCreateIntegration(lacework)
if err != nil {
return errors.Wrap(err, "unable to create integration")
}
cli.OutputHuman("The integration was created.\n")
return nil
},
}
// integrationUpdateCmd represents the update sub-command inside the integration command
integrationUpdateCmd = &cobra.Command{
Use: "update",
Hidden: true,
Short: "update an external integrations",
Args: cobra.NoArgs,
RunE: func(_ *cobra.Command, _ []string) error {
return nil
},
}
// integrationDeleteCmd represents the delete sub-command inside the integration command
integrationDeleteCmd = &cobra.Command{
Use: "delete <int_guid>",
Short: "delete an external integrations",
Long: `Delete an external integration by providing its integration GUID. Integration
GUIDs can be found by using the 'lacework integration list' command.`,
Args: cobra.ExactArgs(1),
RunE: func(_ *cobra.Command, args []string) error {
lacework, err := api.NewClient(cli.Account,
api.WithLogLevel(cli.LogLevel),
api.WithApiKeys(cli.KeyID, cli.Secret),
)
if err != nil {
return errors.Wrap(err, "unable to generate api client")
}
cli.Log.Info("deleting integration", "int_guid", args[0])
cli.StartProgress(" Deleting integration...")
response, err := lacework.Integrations.Delete(args[0])
cli.StopProgress()
if err != nil {
return errors.Wrap(err, "unable to delete integration")
}
if cli.JSONOutput() {
return cli.OutputJSON(response.Data)
}
cli.OutputHuman("The integration %s was deleted.\n", args[0])
return nil
},
}
)
func init() {
// add the integration command
rootCmd.AddCommand(integrationCmd)
// add sub-commands to the integration command
integrationCmd.AddCommand(integrationListCmd)
integrationCmd.AddCommand(integrationShowCmd)
integrationCmd.AddCommand(integrationCreateCmd)
integrationCmd.AddCommand(integrationUpdateCmd)
integrationCmd.AddCommand(integrationDeleteCmd)
}
func | (lacework *api.Client) error {
var (
integration = ""
prompt = &survey.Select{
Message: "Choose an integration type to create: ",
Options: []string{
"Docker Hub",
"AWS Config",
"AWS CloudTrail",
"GCP Config",
"GCP Audit Log",
"Azure Config",
"Azure Activity Log",
//"Docker V2 Registry",
//"Amazon Container Registry",
//"Google Container Registry",
//"Snowflake Data Share",
},
}
err = survey.AskOne(prompt, &integration)
)
if err != nil {
return err
}
switch integration {
case "Docker Hub":
return createDockerHubIntegration(lacework)
case "AWS Config":
return createAwsConfigIntegration(lacework)
case "AWS CloudTrail":
return createAwsCloudTrailIntegration(lacework)
case "GCP Config":
return createGcpConfigIntegration(lacework)
case "GCP Audit Log":
return createGcpAuditLogIntegration(lacework)
case "Azure Config":
return createAzureConfigIntegration(lacework)
case "Azure Activity Log":
return createAzureActivityLogIntegration(lacework)
//case "Docker V2 Registry":
//case "Amazon Container Registry":
//case "Google Container Registry":
//case "Snowflake Data Share":
default:
return errors.New("unknown integration type")
}
}
func integrationsTable(integrations []api.RawIntegration) [][]string {
out := [][]string{}
for _, idata := range integrations {
out = append(out, []string{
idata.IntgGuid,
idata.Name,
idata.Type,
idata.Status(),
idata.StateString(),
})
}
return out
}
func buildIntegrationsTable(integrations []api.RawIntegration) string {
var (
tableBuilder = &strings.Builder{}
t = tablewriter.NewWriter(tableBuilder)
)
t.SetHeader([]string{
"Integration GUID",
"Name",
"Type",
"Status",
"State",
})
t.SetBorder(false)
t.AppendBulk(integrationsTable(integrations))
t.Render()
return tableBuilder.String()
}
func buildIntDetailsTable(integrations []api.RawIntegration) string {
var (
main = &strings.Builder{}
details = &strings.Builder{}
t = tablewriter.NewWriter(details)
)
t.SetBorder(false)
t.SetAlignment(tablewriter.ALIGN_LEFT)
if len(integrations) != 0 {
integration := integrations[0]
t.AppendBulk(reflectIntegrationData(integration))
t.AppendBulk(buildIntegrationState(integration.State))
}
t.Render()
t = tablewriter.NewWriter(main)
t.SetBorder(false)
t.SetAutoWrapText(false)
t.SetHeader([]string{"INTEGRATION DETAILS"})
t.Append([]string{details.String()})
t.Render()
return main.String()
}
func buildIntegrationState(state *api.IntegrationState) [][]string {
if state != nil {
return [][]string{
[]string{"LAST UPDATED TIME", state.LastUpdatedTime},
[]string{"LAST SUCCESSFUL TIME", state.LastSuccessfulTime},
}
}
return [][]string{}
}
func reflectIntegrationData(raw api.RawIntegration) [][]string {
switch raw.Type {
case api.GcpCfgIntegration.String(),
api.GcpAuditLogIntegration.String():
var iData api.GcpIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"LEVEL", iData.IdType},
[]string{"ORG/PROJECT ID", iData.ID},
[]string{"CLIENT ID", iData.Credentials.ClientId},
[]string{"CLIENT EMAIL", iData.Credentials.ClientEmail},
[]string{"PRIVATE KEY ID", iData.Credentials.PrivateKeyId},
}
if iData.SubscriptionName != "" {
return append(out, []string{"SUBSCRIPTION NAME", iData.SubscriptionName})
}
return out
case api.AwsCfgIntegration.String(),
api.AwsCloudTrailIntegration.String():
var iData api.AwsIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"ROLE ARN", iData.Credentials.RoleArn},
[]string{"EXTERNAL ID", iData.Credentials.ExternalId},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
case api.AzureCfgIntegration.String(),
api.AzureActivityLogIntegration.String():
var iData api.AzureIntegrationData
err := mapstructure.Decode(raw.Data, &iData)
if err != nil {
cli.Log.Debugw("unable to decode integration data",
"integration_type", raw.Type,
"raw_data", raw.Data,
"error", err,
)
break
}
out := [][]string{
[]string{"CLIENT ID", iData.Credentials.ClientID},
[]string{"CLIENT SECRET", iData.Credentials.ClientSecret},
[]string{"TENANT ID", iData.TenantID},
}
if iData.QueueUrl != "" {
return append(out, []string{"QUEUE URL", iData.QueueUrl})
}
return out
default:
out := [][]string{}
for key, value := range deepKeyValueExtract(raw.Data) {
out = append(out, []string{key, value})
}
return out
}
return [][]string{}
}
func deepKeyValueExtract(v interface{}) map[string]string {
out := map[string]string{}
m, ok := v.(map[string]interface{})
if !ok {
return out
}
for key, value := range m {
if s, ok := value.(string); ok {
out[key] = s
} else {
deepMap := deepKeyValueExtract(value)
for deepK, deepV := range deepMap {
out[deepK] = deepV
}
}
}
return out
}
| promptCreateIntegration | identifier_name |
validation_host.rs | // Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg(not(any(target_os = "android", target_os = "unknown")))]
use std::{process, env, sync::Arc, sync::atomic, path::PathBuf};
use codec::{Decode, Encode};
use crate::primitives::{ValidationParams, ValidationResult};
use super::{
validate_candidate_internal, ValidationError, InvalidCandidate, InternalError,
MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM,
};
use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet};
use parking_lot::Mutex;
use log::{debug, trace};
use futures::executor::ThreadPool;
use sp_core::traits::SpawnNamed;
const WORKER_ARG: &'static str = "validation-worker";
/// CLI Argument to start in validation worker mode.
pub const WORKER_ARGS: &[&'static str] = &[WORKER_ARG];
/// Execution timeout in seconds;
#[cfg(debug_assertions)]
pub const EXECUTION_TIMEOUT_SEC: u64 = 30;
#[cfg(not(debug_assertions))]
pub const EXECUTION_TIMEOUT_SEC: u64 = 5;
enum Event {
CandidateReady = 0,
ResultReady = 1,
WorkerReady = 2,
}
#[derive(Clone)]
struct TaskExecutor(ThreadPool);
impl TaskExecutor {
fn new() -> Result<Self, String> {
ThreadPool::new().map_err(|e| e.to_string()).map(Self)
}
}
impl SpawnNamed for TaskExecutor {
fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
}
/// A pool of hosts.
#[derive(Clone, Debug)]
pub struct ValidationPool {
hosts: Arc<Vec<Mutex<ValidationHost>>>,
}
const DEFAULT_NUM_HOSTS: usize = 8;
impl ValidationPool {
/// Creates a validation pool with the default configuration.
pub fn new() -> ValidationPool {
ValidationPool {
hosts: Arc::new((0..DEFAULT_NUM_HOSTS).map(|_| Default::default()).collect()),
}
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use `std::env::current_exe()` with the default arguments [`WORKER_ARGS`] to run the worker.
pub fn validate_candidate(
&self,
validation_code: &[u8],
params: ValidationParams,
) -> Result<ValidationResult, ValidationError> {
self.validate_candidate_custom(
validation_code,
params,
&env::current_exe().map_err(|err| ValidationError::Internal(err.into()))?,
WORKER_ARGS,
)
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use the command and the arguments provided in the function's arguments to run the worker.
pub fn validate_candidate_custom(
&self,
validation_code: &[u8],
params: ValidationParams,
command: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
for host in self.hosts.iter() {
if let Some(mut host) = host.try_lock() {
return host.validate_candidate(validation_code, params, command, args)
}
}
// all workers are busy, just wait for the first one
self.hosts[0].lock().validate_candidate(validation_code, params, command, args)
}
}
/// Validation worker process entry point. Runs a loop waiting for candidates to validate
/// and sends back results via shared memory.
pub fn run_worker(mem_id: &str) -> Result<(), String> {
let mut memory = match SharedMem::open(mem_id) {
Ok(memory) => memory,
Err(e) => {
debug!("{} Error opening shared memory: {:?}", process::id(), e);
return Err(format!("Error opening shared memory: {:?}", e));
}
};
let exit = Arc::new(atomic::AtomicBool::new(false));
let task_executor = TaskExecutor::new()?;
// spawn parent monitor thread
let watch_exit = exit.clone();
std::thread::spawn(move || {
use std::io::Read;
let mut in_data = Vec::new();
// pipe terminates when parent process exits
std::io::stdin().read_to_end(&mut in_data).ok();
debug!("{} Parent process is dead. Exiting", process::id());
exit.store(true, atomic::Ordering::Relaxed);
});
memory.set(Event::WorkerReady as usize, EventState::Signaled)
.map_err(|e| format!("{} Error setting shared event: {:?}", process::id(), e))?;
loop {
if watch_exit.load(atomic::Ordering::Relaxed) {
break;
}
debug!("{} Waiting for candidate", process::id());
match memory.wait(Event::CandidateReady as usize, shared_memory::Timeout::Sec(3)) {
Err(e) => {
// Timeout
trace!("{} Timeout waiting for candidate: {:?}", process::id(), e);
continue;
}
Ok(()) => {}
}
{
debug!("{} Processing candidate", process::id());
// we have candidate data
let mut slice = memory.wlock_as_slice(0)
.map_err(|e| format!("Error locking shared memory: {:?}", e))?;
let result = {
let data: &mut[u8] = &mut **slice;
let (header_buf, rest) = data.split_at_mut(1024);
let mut header_buf: &[u8] = header_buf;
let header = ValidationHeader::decode(&mut header_buf)
.map_err(|_| format!("Error decoding validation request."))?;
debug!("{} Candidate header: {:?}", process::id(), header);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(header.code_size as usize);
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
let (call_data, _) = call_data.split_at_mut(header.params_size as usize);
let result = validate_candidate_internal(code, call_data, task_executor.clone());
debug!("{} Candidate validated: {:?}", process::id(), result);
match result {
Ok(r) => ValidationResultHeader::Ok(r),
Err(ValidationError::Internal(e)) =>
ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())),
Err(ValidationError::InvalidCandidate(e)) =>
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())),
}
};
let mut data: &mut[u8] = &mut **slice;
result.encode_to(&mut data);
}
debug!("{} Signaling result", process::id());
memory.set(Event::ResultReady as usize, EventState::Signaled)
.map_err(|e| format!("Error setting shared event: {:?}", e))?;
}
Ok(())
}
/// Params header in shared memory. All offsets should be aligned to WASM page size.
#[derive(Encode, Decode, Debug)]
struct ValidationHeader {
code_size: u64,
params_size: u64,
}
#[derive(Encode, Decode, Debug)]
enum WorkerValidationError {
InternalError(String),
ValidationError(String),
}
#[derive(Encode, Decode, Debug)]
enum ValidationResultHeader {
Ok(ValidationResult),
Error(WorkerValidationError),
}
unsafe impl Send for ValidationHost {}
struct ValidationHostMemory(SharedMem);
impl std::fmt::Debug for ValidationHostMemory {
fn | (&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ValidationHostMemory")
}
}
impl std::ops::Deref for ValidationHostMemory {
type Target = SharedMem;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ValidationHostMemory {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
struct ValidationHost {
worker: Option<process::Child>,
memory: Option<ValidationHostMemory>,
id: u32,
}
impl Drop for ValidationHost {
fn drop(&mut self) {
if let Some(ref mut worker) = &mut self.worker {
worker.kill().ok();
}
}
}
impl ValidationHost {
fn create_memory() -> Result<SharedMem, InternalError> {
let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM;
let mem_config = SharedMemConf::default()
.set_size(mem_size)
.add_lock(shared_memory::LockType::Mutex, 0, mem_size)?
.add_event(shared_memory::EventType::Auto)? // Event::CandidateReady
.add_event(shared_memory::EventType::Auto)? // Event::ResultReady
.add_event(shared_memory::EventType::Auto)?; // Event::WorkerReady
Ok(mem_config.create()?)
}
fn start_worker(&mut self, cmd: &PathBuf, args: &[&str]) -> Result<(), InternalError> {
if let Some(ref mut worker) = self.worker {
// Check if still alive
if let Ok(None) = worker.try_wait() {
// Still running
return Ok(());
}
}
let memory = Self::create_memory()?;
debug!("Starting worker at {:?} with arguments: {:?} and {:?}", cmd, args, memory.get_os_path());
let worker = process::Command::new(cmd)
.args(args)
.arg(memory.get_os_path())
.stdin(process::Stdio::piped())
.spawn()?;
self.id = worker.id();
self.worker = Some(worker);
memory.wait(
Event::WorkerReady as usize,
shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize),
)?;
self.memory = Some(ValidationHostMemory(memory));
Ok(())
}
/// Validate a candidate under the given validation code.
///
/// This will fail if the validation code is not a proper parachain validation module.
pub fn validate_candidate(
&mut self,
validation_code: &[u8],
params: ValidationParams,
binary: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
if validation_code.len() > MAX_CODE_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len())));
}
// First, check if need to spawn the child process
self.start_worker(binary, args)?;
let memory = self.memory.as_mut()
.expect("memory is always `Some` after `start_worker` completes successfully");
{
// Put data in shared mem
let data: &mut[u8] = &mut **memory.wlock_as_slice(0)
.map_err(|e|ValidationError::Internal(e.into()))?;
let (mut header_buf, rest) = data.split_at_mut(1024);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(validation_code.len());
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
code[..validation_code.len()].copy_from_slice(validation_code);
let encoded_params = params.encode();
if encoded_params.len() >= MAX_RUNTIME_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM)));
}
call_data[..encoded_params.len()].copy_from_slice(&encoded_params);
let header = ValidationHeader {
code_size: validation_code.len() as u64,
params_size: encoded_params.len() as u64,
};
header.encode_to(&mut header_buf);
}
debug!("{} Signaling candidate", self.id);
memory.set(Event::CandidateReady as usize, EventState::Signaled)
.map_err(|e| ValidationError::Internal(e.into()))?;
debug!("{} Waiting for results", self.id);
match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) {
Err(e) => {
debug!("Worker timeout: {:?}", e);
if let Some(mut worker) = self.worker.take() {
worker.kill().ok();
}
return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout));
}
Ok(()) => {}
}
{
debug!("{} Reading results", self.id);
let data: &[u8] = &**memory.wlock_as_slice(0)
.map_err(|e| ValidationError::Internal(e.into()))?;
let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM);
let mut header_buf: &[u8] = header_buf;
let header = ValidationResultHeader::decode(&mut header_buf)
.map_err(|e|
InternalError::System(
Box::<dyn std::error::Error + Send + Sync>::from(
format!("Failed to decode `ValidationResultHeader`: {:?}", e)
) as Box<_>
)
)?;
match header {
ValidationResultHeader::Ok(result) => Ok(result),
ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => {
debug!("{} Internal validation error: {}", self.id, e);
Err(ValidationError::Internal(InternalError::WasmWorker(e)))
},
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => {
debug!("{} External validation error: {}", self.id, e);
Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e)))
}
}
}
}
}
| fmt | identifier_name |
validation_host.rs | // Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg(not(any(target_os = "android", target_os = "unknown")))]
use std::{process, env, sync::Arc, sync::atomic, path::PathBuf};
use codec::{Decode, Encode};
use crate::primitives::{ValidationParams, ValidationResult};
use super::{
validate_candidate_internal, ValidationError, InvalidCandidate, InternalError,
MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM,
};
use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet};
use parking_lot::Mutex;
use log::{debug, trace};
use futures::executor::ThreadPool;
use sp_core::traits::SpawnNamed;
const WORKER_ARG: &'static str = "validation-worker";
/// CLI Argument to start in validation worker mode.
pub const WORKER_ARGS: &[&'static str] = &[WORKER_ARG];
/// Execution timeout in seconds;
#[cfg(debug_assertions)]
pub const EXECUTION_TIMEOUT_SEC: u64 = 30;
#[cfg(not(debug_assertions))]
pub const EXECUTION_TIMEOUT_SEC: u64 = 5;
enum Event {
CandidateReady = 0,
ResultReady = 1,
WorkerReady = 2,
}
#[derive(Clone)]
struct TaskExecutor(ThreadPool);
impl TaskExecutor {
fn new() -> Result<Self, String> {
ThreadPool::new().map_err(|e| e.to_string()).map(Self)
}
}
impl SpawnNamed for TaskExecutor {
fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
}
/// A pool of hosts.
#[derive(Clone, Debug)]
pub struct ValidationPool {
hosts: Arc<Vec<Mutex<ValidationHost>>>,
}
const DEFAULT_NUM_HOSTS: usize = 8;
impl ValidationPool {
/// Creates a validation pool with the default configuration.
pub fn new() -> ValidationPool {
ValidationPool {
hosts: Arc::new((0..DEFAULT_NUM_HOSTS).map(|_| Default::default()).collect()),
}
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use `std::env::current_exe()` with the default arguments [`WORKER_ARGS`] to run the worker.
pub fn validate_candidate(
&self,
validation_code: &[u8],
params: ValidationParams,
) -> Result<ValidationResult, ValidationError> {
self.validate_candidate_custom(
validation_code,
params,
&env::current_exe().map_err(|err| ValidationError::Internal(err.into()))?,
WORKER_ARGS,
)
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use the command and the arguments provided in the function's arguments to run the worker.
pub fn validate_candidate_custom(
&self,
validation_code: &[u8],
params: ValidationParams,
command: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
for host in self.hosts.iter() {
if let Some(mut host) = host.try_lock() {
return host.validate_candidate(validation_code, params, command, args)
}
}
// all workers are busy, just wait for the first one
self.hosts[0].lock().validate_candidate(validation_code, params, command, args)
}
}
/// Validation worker process entry point. Runs a loop waiting for candidates to validate
/// and sends back results via shared memory.
pub fn run_worker(mem_id: &str) -> Result<(), String> {
let mut memory = match SharedMem::open(mem_id) {
Ok(memory) => memory,
Err(e) => {
debug!("{} Error opening shared memory: {:?}", process::id(), e);
return Err(format!("Error opening shared memory: {:?}", e));
}
};
let exit = Arc::new(atomic::AtomicBool::new(false));
let task_executor = TaskExecutor::new()?;
// spawn parent monitor thread
let watch_exit = exit.clone();
std::thread::spawn(move || {
use std::io::Read;
let mut in_data = Vec::new();
// pipe terminates when parent process exits
std::io::stdin().read_to_end(&mut in_data).ok();
debug!("{} Parent process is dead. Exiting", process::id());
exit.store(true, atomic::Ordering::Relaxed);
});
memory.set(Event::WorkerReady as usize, EventState::Signaled)
.map_err(|e| format!("{} Error setting shared event: {:?}", process::id(), e))?;
loop {
if watch_exit.load(atomic::Ordering::Relaxed) {
break;
}
debug!("{} Waiting for candidate", process::id());
match memory.wait(Event::CandidateReady as usize, shared_memory::Timeout::Sec(3)) {
Err(e) => {
// Timeout
trace!("{} Timeout waiting for candidate: {:?}", process::id(), e);
continue;
}
Ok(()) => {}
}
{
debug!("{} Processing candidate", process::id());
// we have candidate data
let mut slice = memory.wlock_as_slice(0)
.map_err(|e| format!("Error locking shared memory: {:?}", e))?;
let result = {
let data: &mut[u8] = &mut **slice;
let (header_buf, rest) = data.split_at_mut(1024);
let mut header_buf: &[u8] = header_buf;
let header = ValidationHeader::decode(&mut header_buf)
.map_err(|_| format!("Error decoding validation request."))?;
debug!("{} Candidate header: {:?}", process::id(), header);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(header.code_size as usize);
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
let (call_data, _) = call_data.split_at_mut(header.params_size as usize);
let result = validate_candidate_internal(code, call_data, task_executor.clone());
debug!("{} Candidate validated: {:?}", process::id(), result);
match result {
Ok(r) => ValidationResultHeader::Ok(r),
Err(ValidationError::Internal(e)) =>
ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())),
Err(ValidationError::InvalidCandidate(e)) =>
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())),
}
};
let mut data: &mut[u8] = &mut **slice;
result.encode_to(&mut data);
}
debug!("{} Signaling result", process::id());
memory.set(Event::ResultReady as usize, EventState::Signaled)
.map_err(|e| format!("Error setting shared event: {:?}", e))?;
}
Ok(())
}
/// Params header in shared memory. All offsets should be aligned to WASM page size.
#[derive(Encode, Decode, Debug)]
struct ValidationHeader {
code_size: u64,
params_size: u64,
}
#[derive(Encode, Decode, Debug)]
enum WorkerValidationError {
InternalError(String),
ValidationError(String),
}
#[derive(Encode, Decode, Debug)]
enum ValidationResultHeader {
Ok(ValidationResult),
Error(WorkerValidationError),
}
unsafe impl Send for ValidationHost {}
struct ValidationHostMemory(SharedMem);
impl std::fmt::Debug for ValidationHostMemory {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ValidationHostMemory")
}
}
impl std::ops::Deref for ValidationHostMemory {
type Target = SharedMem;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ValidationHostMemory {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
struct ValidationHost {
worker: Option<process::Child>,
memory: Option<ValidationHostMemory>,
id: u32,
}
impl Drop for ValidationHost {
fn drop(&mut self) {
if let Some(ref mut worker) = &mut self.worker {
worker.kill().ok();
}
}
}
impl ValidationHost {
fn create_memory() -> Result<SharedMem, InternalError> {
let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM;
let mem_config = SharedMemConf::default()
.set_size(mem_size)
.add_lock(shared_memory::LockType::Mutex, 0, mem_size)?
.add_event(shared_memory::EventType::Auto)? // Event::CandidateReady
.add_event(shared_memory::EventType::Auto)? // Event::ResultReady
.add_event(shared_memory::EventType::Auto)?; // Event::WorkerReady
Ok(mem_config.create()?)
}
fn start_worker(&mut self, cmd: &PathBuf, args: &[&str]) -> Result<(), InternalError> {
if let Some(ref mut worker) = self.worker {
// Check if still alive
if let Ok(None) = worker.try_wait() {
// Still running
return Ok(());
}
}
let memory = Self::create_memory()?;
debug!("Starting worker at {:?} with arguments: {:?} and {:?}", cmd, args, memory.get_os_path());
let worker = process::Command::new(cmd)
.args(args)
.arg(memory.get_os_path())
.stdin(process::Stdio::piped())
.spawn()?;
self.id = worker.id();
self.worker = Some(worker);
memory.wait(
Event::WorkerReady as usize,
shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize),
)?;
self.memory = Some(ValidationHostMemory(memory));
Ok(())
}
/// Validate a candidate under the given validation code.
///
/// This will fail if the validation code is not a proper parachain validation module.
pub fn validate_candidate(
&mut self, | if validation_code.len() > MAX_CODE_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len())));
}
// First, check if need to spawn the child process
self.start_worker(binary, args)?;
let memory = self.memory.as_mut()
.expect("memory is always `Some` after `start_worker` completes successfully");
{
// Put data in shared mem
let data: &mut[u8] = &mut **memory.wlock_as_slice(0)
.map_err(|e|ValidationError::Internal(e.into()))?;
let (mut header_buf, rest) = data.split_at_mut(1024);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(validation_code.len());
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
code[..validation_code.len()].copy_from_slice(validation_code);
let encoded_params = params.encode();
if encoded_params.len() >= MAX_RUNTIME_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM)));
}
call_data[..encoded_params.len()].copy_from_slice(&encoded_params);
let header = ValidationHeader {
code_size: validation_code.len() as u64,
params_size: encoded_params.len() as u64,
};
header.encode_to(&mut header_buf);
}
debug!("{} Signaling candidate", self.id);
memory.set(Event::CandidateReady as usize, EventState::Signaled)
.map_err(|e| ValidationError::Internal(e.into()))?;
debug!("{} Waiting for results", self.id);
match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) {
Err(e) => {
debug!("Worker timeout: {:?}", e);
if let Some(mut worker) = self.worker.take() {
worker.kill().ok();
}
return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout));
}
Ok(()) => {}
}
{
debug!("{} Reading results", self.id);
let data: &[u8] = &**memory.wlock_as_slice(0)
.map_err(|e| ValidationError::Internal(e.into()))?;
let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM);
let mut header_buf: &[u8] = header_buf;
let header = ValidationResultHeader::decode(&mut header_buf)
.map_err(|e|
InternalError::System(
Box::<dyn std::error::Error + Send + Sync>::from(
format!("Failed to decode `ValidationResultHeader`: {:?}", e)
) as Box<_>
)
)?;
match header {
ValidationResultHeader::Ok(result) => Ok(result),
ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => {
debug!("{} Internal validation error: {}", self.id, e);
Err(ValidationError::Internal(InternalError::WasmWorker(e)))
},
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => {
debug!("{} External validation error: {}", self.id, e);
Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e)))
}
}
}
}
} | validation_code: &[u8],
params: ValidationParams,
binary: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> { | random_line_split |
validation_host.rs | // Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg(not(any(target_os = "android", target_os = "unknown")))]
use std::{process, env, sync::Arc, sync::atomic, path::PathBuf};
use codec::{Decode, Encode};
use crate::primitives::{ValidationParams, ValidationResult};
use super::{
validate_candidate_internal, ValidationError, InvalidCandidate, InternalError,
MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM,
};
use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet};
use parking_lot::Mutex;
use log::{debug, trace};
use futures::executor::ThreadPool;
use sp_core::traits::SpawnNamed;
const WORKER_ARG: &'static str = "validation-worker";
/// CLI Argument to start in validation worker mode.
pub const WORKER_ARGS: &[&'static str] = &[WORKER_ARG];
/// Execution timeout in seconds;
#[cfg(debug_assertions)]
pub const EXECUTION_TIMEOUT_SEC: u64 = 30;
#[cfg(not(debug_assertions))]
pub const EXECUTION_TIMEOUT_SEC: u64 = 5;
enum Event {
CandidateReady = 0,
ResultReady = 1,
WorkerReady = 2,
}
#[derive(Clone)]
struct TaskExecutor(ThreadPool);
impl TaskExecutor {
fn new() -> Result<Self, String> {
ThreadPool::new().map_err(|e| e.to_string()).map(Self)
}
}
impl SpawnNamed for TaskExecutor {
fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
self.0.spawn_ok(future);
}
}
/// A pool of hosts.
#[derive(Clone, Debug)]
pub struct ValidationPool {
hosts: Arc<Vec<Mutex<ValidationHost>>>,
}
const DEFAULT_NUM_HOSTS: usize = 8;
impl ValidationPool {
/// Creates a validation pool with the default configuration.
pub fn new() -> ValidationPool {
ValidationPool {
hosts: Arc::new((0..DEFAULT_NUM_HOSTS).map(|_| Default::default()).collect()),
}
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use `std::env::current_exe()` with the default arguments [`WORKER_ARGS`] to run the worker.
pub fn validate_candidate(
&self,
validation_code: &[u8],
params: ValidationParams,
) -> Result<ValidationResult, ValidationError> {
self.validate_candidate_custom(
validation_code,
params,
&env::current_exe().map_err(|err| ValidationError::Internal(err.into()))?,
WORKER_ARGS,
)
}
/// Validate a candidate under the given validation code using the next free validation host.
///
/// This will fail if the validation code is not a proper parachain validation module.
///
/// This function will use the command and the arguments provided in the function's arguments to run the worker.
pub fn validate_candidate_custom(
&self,
validation_code: &[u8],
params: ValidationParams,
command: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
for host in self.hosts.iter() {
if let Some(mut host) = host.try_lock() {
return host.validate_candidate(validation_code, params, command, args)
}
}
// all workers are busy, just wait for the first one
self.hosts[0].lock().validate_candidate(validation_code, params, command, args)
}
}
/// Validation worker process entry point. Runs a loop waiting for candidates to validate
/// and sends back results via shared memory.
pub fn run_worker(mem_id: &str) -> Result<(), String> {
let mut memory = match SharedMem::open(mem_id) {
Ok(memory) => memory,
Err(e) => {
debug!("{} Error opening shared memory: {:?}", process::id(), e);
return Err(format!("Error opening shared memory: {:?}", e));
}
};
let exit = Arc::new(atomic::AtomicBool::new(false));
let task_executor = TaskExecutor::new()?;
// spawn parent monitor thread
let watch_exit = exit.clone();
std::thread::spawn(move || {
use std::io::Read;
let mut in_data = Vec::new();
// pipe terminates when parent process exits
std::io::stdin().read_to_end(&mut in_data).ok();
debug!("{} Parent process is dead. Exiting", process::id());
exit.store(true, atomic::Ordering::Relaxed);
});
memory.set(Event::WorkerReady as usize, EventState::Signaled)
.map_err(|e| format!("{} Error setting shared event: {:?}", process::id(), e))?;
loop {
if watch_exit.load(atomic::Ordering::Relaxed) {
break;
}
debug!("{} Waiting for candidate", process::id());
match memory.wait(Event::CandidateReady as usize, shared_memory::Timeout::Sec(3)) {
Err(e) => {
// Timeout
trace!("{} Timeout waiting for candidate: {:?}", process::id(), e);
continue;
}
Ok(()) => {}
}
{
debug!("{} Processing candidate", process::id());
// we have candidate data
let mut slice = memory.wlock_as_slice(0)
.map_err(|e| format!("Error locking shared memory: {:?}", e))?;
let result = {
let data: &mut[u8] = &mut **slice;
let (header_buf, rest) = data.split_at_mut(1024);
let mut header_buf: &[u8] = header_buf;
let header = ValidationHeader::decode(&mut header_buf)
.map_err(|_| format!("Error decoding validation request."))?;
debug!("{} Candidate header: {:?}", process::id(), header);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(header.code_size as usize);
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
let (call_data, _) = call_data.split_at_mut(header.params_size as usize);
let result = validate_candidate_internal(code, call_data, task_executor.clone());
debug!("{} Candidate validated: {:?}", process::id(), result);
match result {
Ok(r) => ValidationResultHeader::Ok(r),
Err(ValidationError::Internal(e)) =>
ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())),
Err(ValidationError::InvalidCandidate(e)) =>
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())),
}
};
let mut data: &mut[u8] = &mut **slice;
result.encode_to(&mut data);
}
debug!("{} Signaling result", process::id());
memory.set(Event::ResultReady as usize, EventState::Signaled)
.map_err(|e| format!("Error setting shared event: {:?}", e))?;
}
Ok(())
}
/// Params header in shared memory. All offsets should be aligned to WASM page size.
#[derive(Encode, Decode, Debug)]
struct ValidationHeader {
code_size: u64,
params_size: u64,
}
#[derive(Encode, Decode, Debug)]
enum WorkerValidationError {
InternalError(String),
ValidationError(String),
}
#[derive(Encode, Decode, Debug)]
enum ValidationResultHeader {
Ok(ValidationResult),
Error(WorkerValidationError),
}
unsafe impl Send for ValidationHost {}
struct ValidationHostMemory(SharedMem);
impl std::fmt::Debug for ValidationHostMemory {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "ValidationHostMemory")
}
}
impl std::ops::Deref for ValidationHostMemory {
type Target = SharedMem;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ValidationHostMemory {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
struct ValidationHost {
worker: Option<process::Child>,
memory: Option<ValidationHostMemory>,
id: u32,
}
impl Drop for ValidationHost {
fn drop(&mut self) {
if let Some(ref mut worker) = &mut self.worker {
worker.kill().ok();
}
}
}
impl ValidationHost {
fn create_memory() -> Result<SharedMem, InternalError> {
let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM;
let mem_config = SharedMemConf::default()
.set_size(mem_size)
.add_lock(shared_memory::LockType::Mutex, 0, mem_size)?
.add_event(shared_memory::EventType::Auto)? // Event::CandidateReady
.add_event(shared_memory::EventType::Auto)? // Event::ResultReady
.add_event(shared_memory::EventType::Auto)?; // Event::WorkerReady
Ok(mem_config.create()?)
}
fn start_worker(&mut self, cmd: &PathBuf, args: &[&str]) -> Result<(), InternalError> {
if let Some(ref mut worker) = self.worker {
// Check if still alive
if let Ok(None) = worker.try_wait() {
// Still running
return Ok(());
}
}
let memory = Self::create_memory()?;
debug!("Starting worker at {:?} with arguments: {:?} and {:?}", cmd, args, memory.get_os_path());
let worker = process::Command::new(cmd)
.args(args)
.arg(memory.get_os_path())
.stdin(process::Stdio::piped())
.spawn()?;
self.id = worker.id();
self.worker = Some(worker);
memory.wait(
Event::WorkerReady as usize,
shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize),
)?;
self.memory = Some(ValidationHostMemory(memory));
Ok(())
}
/// Validate a candidate under the given validation code.
///
/// This will fail if the validation code is not a proper parachain validation module.
pub fn validate_candidate(
&mut self,
validation_code: &[u8],
params: ValidationParams,
binary: &PathBuf,
args: &[&str],
) -> Result<ValidationResult, ValidationError> {
if validation_code.len() > MAX_CODE_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len())));
}
// First, check if need to spawn the child process
self.start_worker(binary, args)?;
let memory = self.memory.as_mut()
.expect("memory is always `Some` after `start_worker` completes successfully");
{
// Put data in shared mem
let data: &mut[u8] = &mut **memory.wlock_as_slice(0)
.map_err(|e|ValidationError::Internal(e.into()))?;
let (mut header_buf, rest) = data.split_at_mut(1024);
let (code, rest) = rest.split_at_mut(MAX_CODE_MEM);
let (code, _) = code.split_at_mut(validation_code.len());
let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM);
code[..validation_code.len()].copy_from_slice(validation_code);
let encoded_params = params.encode();
if encoded_params.len() >= MAX_RUNTIME_MEM {
return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM)));
}
call_data[..encoded_params.len()].copy_from_slice(&encoded_params);
let header = ValidationHeader {
code_size: validation_code.len() as u64,
params_size: encoded_params.len() as u64,
};
header.encode_to(&mut header_buf);
}
debug!("{} Signaling candidate", self.id);
memory.set(Event::CandidateReady as usize, EventState::Signaled)
.map_err(|e| ValidationError::Internal(e.into()))?;
debug!("{} Waiting for results", self.id);
match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) {
Err(e) => {
debug!("Worker timeout: {:?}", e);
if let Some(mut worker) = self.worker.take() {
worker.kill().ok();
}
return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout));
}
Ok(()) => |
}
{
debug!("{} Reading results", self.id);
let data: &[u8] = &**memory.wlock_as_slice(0)
.map_err(|e| ValidationError::Internal(e.into()))?;
let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM);
let mut header_buf: &[u8] = header_buf;
let header = ValidationResultHeader::decode(&mut header_buf)
.map_err(|e|
InternalError::System(
Box::<dyn std::error::Error + Send + Sync>::from(
format!("Failed to decode `ValidationResultHeader`: {:?}", e)
) as Box<_>
)
)?;
match header {
ValidationResultHeader::Ok(result) => Ok(result),
ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => {
debug!("{} Internal validation error: {}", self.id, e);
Err(ValidationError::Internal(InternalError::WasmWorker(e)))
},
ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => {
debug!("{} External validation error: {}", self.id, e);
Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e)))
}
}
}
}
}
| {} | conditional_block |
auth.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/auth.proto
package serviceconfig // import "google.golang.org/genproto/googleapis/api/serviceconfig"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Authentication` defines the authentication configuration for an API.
//
// Example for an API targeted for external use:
//
// name: calendar.googleapis.com
// authentication:
// providers:
// - id: google_calendar_auth
// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
// issuer: https://securetoken.google.com
// rules:
// - selector: "*"
// requirements:
// provider_id: google_calendar_auth
type Authentication struct {
// A list of authentication rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
// Defines a set of authentication providers that a service supports.
Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers,proto3" json:"providers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Authentication) Reset() { *m = Authentication{} }
func (m *Authentication) String() string { return proto.CompactTextString(m) }
func (*Authentication) ProtoMessage() {}
func (*Authentication) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{0}
}
func (m *Authentication) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Authentication.Unmarshal(m, b)
}
func (m *Authentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Authentication.Marshal(b, m, deterministic)
}
func (dst *Authentication) XXX_Merge(src proto.Message) {
xxx_messageInfo_Authentication.Merge(dst, src)
}
func (m *Authentication) XXX_Size() int {
return xxx_messageInfo_Authentication.Size(m)
}
func (m *Authentication) XXX_DiscardUnknown() {
xxx_messageInfo_Authentication.DiscardUnknown(m)
}
var xxx_messageInfo_Authentication proto.InternalMessageInfo
func (m *Authentication) GetRules() []*AuthenticationRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Authentication) GetProviders() []*AuthProvider {
if m != nil {
return m.Providers
}
return nil
}
// Authentication rules for the service.
//
// By default, if a method has any authentication requirements, every request
// must include a valid credential matching one of the requirements.
// It's an error to include more than one kind of credential in a single
// request.
//
// If a method doesn't have any auth requirements, request credentials will be
// ignored.
type AuthenticationRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// The requirements for OAuth credentials.
Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth,proto3" json:"oauth,omitempty"`
// If true, the service accepts API keys without any other credential.
AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential,proto3" json:"allow_without_credential,omitempty"`
// Requirements for additional authentication providers.
Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements,proto3" json:"requirements,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthenticationRule) Reset() { *m = AuthenticationRule{} }
func (m *AuthenticationRule) String() string { return proto.CompactTextString(m) }
func (*AuthenticationRule) ProtoMessage() {}
func (*AuthenticationRule) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{1}
}
func (m *AuthenticationRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthenticationRule.Unmarshal(m, b)
}
func (m *AuthenticationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthenticationRule.Marshal(b, m, deterministic)
}
func (dst *AuthenticationRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthenticationRule.Merge(dst, src)
}
func (m *AuthenticationRule) XXX_Size() int |
func (m *AuthenticationRule) XXX_DiscardUnknown() {
xxx_messageInfo_AuthenticationRule.DiscardUnknown(m)
}
var xxx_messageInfo_AuthenticationRule proto.InternalMessageInfo
func (m *AuthenticationRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *AuthenticationRule) GetOauth() *OAuthRequirements {
if m != nil {
return m.Oauth
}
return nil
}
func (m *AuthenticationRule) GetAllowWithoutCredential() bool {
if m != nil {
return m.AllowWithoutCredential
}
return false
}
func (m *AuthenticationRule) GetRequirements() []*AuthRequirement {
if m != nil {
return m.Requirements
}
return nil
}
// Configuration for an authentication provider, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthProvider struct {
// The unique identifier of the auth provider. It will be referred to by
// `AuthRequirement.provider_id`.
//
// Example: "bookstore_auth".
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Identifies the principal that issued the JWT. See
// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
// Usually a URL or an email address.
//
// Example: https://securetoken.google.com
// Example: [email protected]
Issuer string `protobuf:"bytes,2,opt,name=issuer,proto3" json:"issuer,omitempty"`
// URL of the provider's public key set to validate signature of the JWT. See
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
// Optional if the key set document:
// - can be retrieved from
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of
// the issuer.
// - can be inferred from the email domain of the issuer (e.g. a Google
// service account).
//
// Example: https://www.googleapis.com/oauth2/v1/certs
JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri,proto3" json:"jwks_uri,omitempty"`
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,4,opt,name=audiences,proto3" json:"audiences,omitempty"`
// Redirect URL if JWT token is required but not present or is expired.
// Implement authorizationUrl of securityDefinitions in OpenAPI spec.
AuthorizationUrl string `protobuf:"bytes,5,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthProvider) Reset() { *m = AuthProvider{} }
func (m *AuthProvider) String() string { return proto.CompactTextString(m) }
func (*AuthProvider) ProtoMessage() {}
func (*AuthProvider) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{2}
}
func (m *AuthProvider) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthProvider.Unmarshal(m, b)
}
func (m *AuthProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthProvider.Marshal(b, m, deterministic)
}
func (dst *AuthProvider) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthProvider.Merge(dst, src)
}
func (m *AuthProvider) XXX_Size() int {
return xxx_messageInfo_AuthProvider.Size(m)
}
func (m *AuthProvider) XXX_DiscardUnknown() {
xxx_messageInfo_AuthProvider.DiscardUnknown(m)
}
var xxx_messageInfo_AuthProvider proto.InternalMessageInfo
func (m *AuthProvider) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AuthProvider) GetIssuer() string {
if m != nil {
return m.Issuer
}
return ""
}
func (m *AuthProvider) GetJwksUri() string {
if m != nil {
return m.JwksUri
}
return ""
}
func (m *AuthProvider) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func (m *AuthProvider) GetAuthorizationUrl() string {
if m != nil {
return m.AuthorizationUrl
}
return ""
}
// OAuth scopes are a way to define data and permissions on data. For example,
// there are scopes defined for "Read-only access to Google Calendar" and
// "Access to Cloud Platform". Users can consent to a scope for an application,
// giving it permission to access that data on their behalf.
//
// OAuth scope specifications should be fairly coarse grained; a user will need
// to see and understand the text description of what your scope means.
//
// In most cases: use one or at most two OAuth scopes for an entire family of
// products. If your product has multiple APIs, you should probably be sharing
// the OAuth scope across all of those APIs.
//
// When you need finer grained OAuth consent screens: talk with your product
// management about how developers will use them in practice.
//
// Please note that even though each of the canonical scopes is enough for a
// request to be accepted and passed to the backend, a request can still fail
// due to the backend requiring additional scopes or permissions.
type OAuthRequirements struct {
// The list of publicly documented OAuth scopes that are allowed access. An
// OAuth token containing any of these scopes will be accepted.
//
// Example:
//
// canonical_scopes: https://www.googleapis.com/auth/calendar,
// https://www.googleapis.com/auth/calendar.read
CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes,proto3" json:"canonical_scopes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OAuthRequirements) Reset() { *m = OAuthRequirements{} }
func (m *OAuthRequirements) String() string { return proto.CompactTextString(m) }
func (*OAuthRequirements) ProtoMessage() {}
func (*OAuthRequirements) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{3}
}
func (m *OAuthRequirements) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OAuthRequirements.Unmarshal(m, b)
}
func (m *OAuthRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OAuthRequirements.Marshal(b, m, deterministic)
}
func (dst *OAuthRequirements) XXX_Merge(src proto.Message) {
xxx_messageInfo_OAuthRequirements.Merge(dst, src)
}
func (m *OAuthRequirements) XXX_Size() int {
return xxx_messageInfo_OAuthRequirements.Size(m)
}
func (m *OAuthRequirements) XXX_DiscardUnknown() {
xxx_messageInfo_OAuthRequirements.DiscardUnknown(m)
}
var xxx_messageInfo_OAuthRequirements proto.InternalMessageInfo
func (m *OAuthRequirements) GetCanonicalScopes() string {
if m != nil {
return m.CanonicalScopes
}
return ""
}
// User-defined authentication requirements, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthRequirement struct {
// [id][google.api.AuthProvider.id] from authentication provider.
//
// Example:
//
// provider_id: bookstore_auth
ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
// NOTE: This will be deprecated soon, once AuthProvider.audiences is
// implemented and accepted in all the runtime components.
//
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,2,opt,name=audiences,proto3" json:"audiences,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthRequirement) Reset() { *m = AuthRequirement{} }
func (m *AuthRequirement) String() string { return proto.CompactTextString(m) }
func (*AuthRequirement) ProtoMessage() {}
func (*AuthRequirement) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{4}
}
func (m *AuthRequirement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthRequirement.Unmarshal(m, b)
}
func (m *AuthRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthRequirement.Marshal(b, m, deterministic)
}
func (dst *AuthRequirement) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthRequirement.Merge(dst, src)
}
func (m *AuthRequirement) XXX_Size() int {
return xxx_messageInfo_AuthRequirement.Size(m)
}
func (m *AuthRequirement) XXX_DiscardUnknown() {
xxx_messageInfo_AuthRequirement.DiscardUnknown(m)
}
var xxx_messageInfo_AuthRequirement proto.InternalMessageInfo
func (m *AuthRequirement) GetProviderId() string {
if m != nil {
return m.ProviderId
}
return ""
}
func (m *AuthRequirement) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func init() {
proto.RegisterType((*Authentication)(nil), "google.api.Authentication")
proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule")
proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider")
proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements")
proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement")
}
func init() { proto.RegisterFile("google/api/auth.proto", fileDescriptor_auth_20a218c05ef5a30e) }
var fileDescriptor_auth_20a218c05ef5a30e = []byte{
// 452 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0x4d, 0x6b, 0xdb, 0x40,
0x10, 0x45, 0x72, 0x9c, 0x58, 0xe3, 0xe0, 0x24, 0x0b, 0x0d, 0xea, 0xb7, 0xd1, 0xc9, 0xa5, 0x20,
0x43, 0x52, 0x4a, 0xa1, 0xd0, 0x92, 0x94, 0x52, 0x72, 0xaa, 0xd9, 0x12, 0x0a, 0xbd, 0x88, 0xed,
0x6a, 0x2b, 0x6f, 0xb3, 0xd1, 0xa8, 0xfb, 0x11, 0x43, 0x0f, 0xfd, 0x25, 0x3d, 0xf5, 0x97, 0xf5,
0xa7, 0x04, 0xad, 0x14, 0x5b, 0x72, 0x8e, 0x33, 0xef, 0xbd, 0x99, 0x79, 0x6f, 0x17, 0x1e, 0x14,
0x88, 0x85, 0x12, 0x73, 0x56, 0xc9, 0x39, 0x73, 0x76, 0x99, 0x56, 0x1a, 0x2d, 0x12, 0x68, 0xda,
0x29, 0xab, 0x64, 0xf2, 0x07, 0x26, 0x67, 0xce, 0x2e, 0x45, 0x69, 0x25, 0x67, 0x56, 0x62, 0x49,
0x5e, 0xc1, 0x50, 0x3b, 0x25, 0x4c, 0x3c, 0x98, 0x0e, 0x66, 0xe3, 0x93, 0x67, 0xe9, 0x86, 0x9d,
0xf6, 0xa9, 0xd4, 0x29, 0x41, 0x1b, 0x32, 0x79, 0x0d, 0x51, 0xa5, 0xf1, 0x46, 0xe6, 0x42, 0x9b,
0x78, 0xc7, 0x2b, 0xe3, 0x6d, 0xe5, 0xa2, 0x25, 0xd0, 0x0d, 0x35, 0xf9, 0x1f, 0x00, 0xb9, 0x3f,
0x95, 0x3c, 0x82, 0x91, 0x11, 0x4a, 0x70, 0x8b, 0x3a, 0x0e, 0xa6, 0xc1, 0x2c, 0xa2, 0xeb, 0x9a,
0x9c, 0xc2, 0x10, 0x6b, 0x37, 0x71, 0x38, 0x0d, 0x66, 0xe3, 0x93, 0xa7, 0xdd, 0x35, 0x9f, 0xeb,
0x59, 0x54, 0xfc, 0x72, 0x52, 0x8b, 0x6b, 0x51, 0x5a, 0x43, 0x1b, 0x2e, 0x79, 0x03, 0x31, 0x53,
0x0a, 0x57, 0xd9, 0x4a, 0xda, 0x25, 0x3a, 0x9b, 0x71, 0x2d, 0xf2, 0x7a, 0x29, 0x53, 0xf1, 0x70,
0x1a, 0xcc, 0x46, 0xf4, 0xd8, 0xe3, 0x5f, 0x1b, 0xf8, 0xc3, 0x1a, 0x25, 0xef, 0x61, 0x5f, 0x77,
0x06, 0xc6, 0x7b, 0xde, 0xdc, 0xe3, 0x6d, 0x73, 0x9d, 0xa5, 0xb4, 0x27, 0x48, 0xfe, 0x06, 0xb0,
0xdf, 0xb5, 0x4f, 0x26, 0x10, 0xca, 0xbc, 0xb5, 0x15, 0xca, 0x9c, 0x1c, 0xc3, 0xae, 0x34, 0xc6,
0x09, 0xed, 0x1d, 0x45, 0xb4, 0xad, 0xc8, 0x43, 0x18, 0xfd, 0x5c, 0x5d, 0x99, 0xcc, 0x69, 0x19,
0x0f, 0x3c, 0xb2, 0x57, 0xd7, 0x97, 0x5a, 0x92, 0x27, 0x10, 0x31, 0x97, 0x4b, 0x51, 0x72, 0x51,
0xc7, 0x5d, 0x63, 0x9b, 0x06, 0x79, 0x09, 0x47, 0xb5, 0x69, 0xd4, 0xf2, 0xb7, 0x8f, 0x34, 0x73,
0xba, 0x71, 0x19, 0xd1, 0xc3, 0x1e, 0x70, 0xa9, 0x55, 0xf2, 0x0e, 0x8e, 0xee, 0xa5, 0x46, 0x5e,
0xc0, 0x21, 0x67, 0x25, 0x96, 0x92, 0x33, 0x95, 0x19, 0x8e, 0x95, 0x30, 0xed, 0xc1, 0x07, 0xeb,
0xfe, 0x17, 0xdf, 0x4e, 0x16, 0x70, 0xb0, 0x25, 0x27, 0xcf, 0x61, 0x7c, 0xf7, 0xc2, 0xd9, 0xda,
0x29, 0xdc, 0xb5, 0x2e, 0xf2, 0xfe, 0xf9, 0xe1, 0xd6, 0xf9, 0xe7, 0x57, 0x30, 0xe1, 0x78, 0xdd,
0x09, 0xf8, 0x3c, 0x6a, 0xf3, 0xb3, 0xb8, 0x08, 0xbe, 0x7d, 0x6c, 0x81, 0x02, 0x15, 0x2b, 0x8b,
0x14, 0x75, 0x31, 0x2f, 0x44, 0xe9, 0xbf, 0xf6, 0xbc, 0x81, 0x58, 0x25, 0x8d, 0xff, 0xf4, 0x46,
0xe8, 0x1b, 0xc9, 0x05, 0xc7, 0xf2, 0x87, 0x2c, 0xde, 0xf6, 0xaa, 0x7f, 0xe1, 0xce, 0xa7, 0xb3,
0xc5, 0xc5, 0xf7, 0x5d, 0x2f, 0x3c, 0xbd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x91, 0x94, 0x96,
0x2c, 0x03, 0x00, 0x00,
}
| {
return xxx_messageInfo_AuthenticationRule.Size(m)
} | identifier_body |
auth.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/auth.proto
package serviceconfig // import "google.golang.org/genproto/googleapis/api/serviceconfig"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Authentication` defines the authentication configuration for an API.
//
// Example for an API targeted for external use:
//
// name: calendar.googleapis.com
// authentication:
// providers:
// - id: google_calendar_auth
// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
// issuer: https://securetoken.google.com
// rules:
// - selector: "*"
// requirements:
// provider_id: google_calendar_auth
type Authentication struct {
// A list of authentication rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
// Defines a set of authentication providers that a service supports.
Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers,proto3" json:"providers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Authentication) Reset() { *m = Authentication{} }
func (m *Authentication) String() string { return proto.CompactTextString(m) }
func (*Authentication) ProtoMessage() {}
func (*Authentication) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{0}
}
func (m *Authentication) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Authentication.Unmarshal(m, b)
}
func (m *Authentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Authentication.Marshal(b, m, deterministic)
}
func (dst *Authentication) XXX_Merge(src proto.Message) {
xxx_messageInfo_Authentication.Merge(dst, src)
}
func (m *Authentication) XXX_Size() int {
return xxx_messageInfo_Authentication.Size(m)
}
func (m *Authentication) XXX_DiscardUnknown() {
xxx_messageInfo_Authentication.DiscardUnknown(m)
}
var xxx_messageInfo_Authentication proto.InternalMessageInfo
func (m *Authentication) GetRules() []*AuthenticationRule {
if m != nil |
return nil
}
func (m *Authentication) GetProviders() []*AuthProvider {
if m != nil {
return m.Providers
}
return nil
}
// Authentication rules for the service.
//
// By default, if a method has any authentication requirements, every request
// must include a valid credential matching one of the requirements.
// It's an error to include more than one kind of credential in a single
// request.
//
// If a method doesn't have any auth requirements, request credentials will be
// ignored.
type AuthenticationRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// The requirements for OAuth credentials.
Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth,proto3" json:"oauth,omitempty"`
// If true, the service accepts API keys without any other credential.
AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential,proto3" json:"allow_without_credential,omitempty"`
// Requirements for additional authentication providers.
Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements,proto3" json:"requirements,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthenticationRule) Reset() { *m = AuthenticationRule{} }
func (m *AuthenticationRule) String() string { return proto.CompactTextString(m) }
func (*AuthenticationRule) ProtoMessage() {}
func (*AuthenticationRule) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{1}
}
func (m *AuthenticationRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthenticationRule.Unmarshal(m, b)
}
func (m *AuthenticationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthenticationRule.Marshal(b, m, deterministic)
}
func (dst *AuthenticationRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthenticationRule.Merge(dst, src)
}
func (m *AuthenticationRule) XXX_Size() int {
return xxx_messageInfo_AuthenticationRule.Size(m)
}
func (m *AuthenticationRule) XXX_DiscardUnknown() {
xxx_messageInfo_AuthenticationRule.DiscardUnknown(m)
}
var xxx_messageInfo_AuthenticationRule proto.InternalMessageInfo
func (m *AuthenticationRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *AuthenticationRule) GetOauth() *OAuthRequirements {
if m != nil {
return m.Oauth
}
return nil
}
func (m *AuthenticationRule) GetAllowWithoutCredential() bool {
if m != nil {
return m.AllowWithoutCredential
}
return false
}
func (m *AuthenticationRule) GetRequirements() []*AuthRequirement {
if m != nil {
return m.Requirements
}
return nil
}
// Configuration for an authentication provider, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthProvider struct {
// The unique identifier of the auth provider. It will be referred to by
// `AuthRequirement.provider_id`.
//
// Example: "bookstore_auth".
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Identifies the principal that issued the JWT. See
// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
// Usually a URL or an email address.
//
// Example: https://securetoken.google.com
// Example: [email protected]
Issuer string `protobuf:"bytes,2,opt,name=issuer,proto3" json:"issuer,omitempty"`
// URL of the provider's public key set to validate signature of the JWT. See
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
// Optional if the key set document:
// - can be retrieved from
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of
// the issuer.
// - can be inferred from the email domain of the issuer (e.g. a Google
// service account).
//
// Example: https://www.googleapis.com/oauth2/v1/certs
JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri,proto3" json:"jwks_uri,omitempty"`
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,4,opt,name=audiences,proto3" json:"audiences,omitempty"`
// Redirect URL if JWT token is required but not present or is expired.
// Implement authorizationUrl of securityDefinitions in OpenAPI spec.
AuthorizationUrl string `protobuf:"bytes,5,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthProvider) Reset() { *m = AuthProvider{} }
func (m *AuthProvider) String() string { return proto.CompactTextString(m) }
func (*AuthProvider) ProtoMessage() {}
func (*AuthProvider) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{2}
}
func (m *AuthProvider) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthProvider.Unmarshal(m, b)
}
func (m *AuthProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthProvider.Marshal(b, m, deterministic)
}
func (dst *AuthProvider) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthProvider.Merge(dst, src)
}
func (m *AuthProvider) XXX_Size() int {
return xxx_messageInfo_AuthProvider.Size(m)
}
func (m *AuthProvider) XXX_DiscardUnknown() {
xxx_messageInfo_AuthProvider.DiscardUnknown(m)
}
var xxx_messageInfo_AuthProvider proto.InternalMessageInfo
func (m *AuthProvider) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AuthProvider) GetIssuer() string {
if m != nil {
return m.Issuer
}
return ""
}
func (m *AuthProvider) GetJwksUri() string {
if m != nil {
return m.JwksUri
}
return ""
}
func (m *AuthProvider) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func (m *AuthProvider) GetAuthorizationUrl() string {
if m != nil {
return m.AuthorizationUrl
}
return ""
}
// OAuth scopes are a way to define data and permissions on data. For example,
// there are scopes defined for "Read-only access to Google Calendar" and
// "Access to Cloud Platform". Users can consent to a scope for an application,
// giving it permission to access that data on their behalf.
//
// OAuth scope specifications should be fairly coarse grained; a user will need
// to see and understand the text description of what your scope means.
//
// In most cases: use one or at most two OAuth scopes for an entire family of
// products. If your product has multiple APIs, you should probably be sharing
// the OAuth scope across all of those APIs.
//
// When you need finer grained OAuth consent screens: talk with your product
// management about how developers will use them in practice.
//
// Please note that even though each of the canonical scopes is enough for a
// request to be accepted and passed to the backend, a request can still fail
// due to the backend requiring additional scopes or permissions.
type OAuthRequirements struct {
// The list of publicly documented OAuth scopes that are allowed access. An
// OAuth token containing any of these scopes will be accepted.
//
// Example:
//
// canonical_scopes: https://www.googleapis.com/auth/calendar,
// https://www.googleapis.com/auth/calendar.read
CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes,proto3" json:"canonical_scopes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OAuthRequirements) Reset() { *m = OAuthRequirements{} }
func (m *OAuthRequirements) String() string { return proto.CompactTextString(m) }
func (*OAuthRequirements) ProtoMessage() {}
func (*OAuthRequirements) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{3}
}
func (m *OAuthRequirements) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OAuthRequirements.Unmarshal(m, b)
}
func (m *OAuthRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OAuthRequirements.Marshal(b, m, deterministic)
}
func (dst *OAuthRequirements) XXX_Merge(src proto.Message) {
xxx_messageInfo_OAuthRequirements.Merge(dst, src)
}
func (m *OAuthRequirements) XXX_Size() int {
return xxx_messageInfo_OAuthRequirements.Size(m)
}
func (m *OAuthRequirements) XXX_DiscardUnknown() {
xxx_messageInfo_OAuthRequirements.DiscardUnknown(m)
}
var xxx_messageInfo_OAuthRequirements proto.InternalMessageInfo
func (m *OAuthRequirements) GetCanonicalScopes() string {
if m != nil {
return m.CanonicalScopes
}
return ""
}
// User-defined authentication requirements, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthRequirement struct {
// [id][google.api.AuthProvider.id] from authentication provider.
//
// Example:
//
// provider_id: bookstore_auth
ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
// NOTE: This will be deprecated soon, once AuthProvider.audiences is
// implemented and accepted in all the runtime components.
//
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,2,opt,name=audiences,proto3" json:"audiences,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthRequirement) Reset() { *m = AuthRequirement{} }
func (m *AuthRequirement) String() string { return proto.CompactTextString(m) }
func (*AuthRequirement) ProtoMessage() {}
func (*AuthRequirement) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{4}
}
func (m *AuthRequirement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthRequirement.Unmarshal(m, b)
}
func (m *AuthRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthRequirement.Marshal(b, m, deterministic)
}
func (dst *AuthRequirement) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthRequirement.Merge(dst, src)
}
func (m *AuthRequirement) XXX_Size() int {
return xxx_messageInfo_AuthRequirement.Size(m)
}
func (m *AuthRequirement) XXX_DiscardUnknown() {
xxx_messageInfo_AuthRequirement.DiscardUnknown(m)
}
var xxx_messageInfo_AuthRequirement proto.InternalMessageInfo
func (m *AuthRequirement) GetProviderId() string {
if m != nil {
return m.ProviderId
}
return ""
}
func (m *AuthRequirement) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func init() {
proto.RegisterType((*Authentication)(nil), "google.api.Authentication")
proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule")
proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider")
proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements")
proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement")
}
func init() { proto.RegisterFile("google/api/auth.proto", fileDescriptor_auth_20a218c05ef5a30e) }
var fileDescriptor_auth_20a218c05ef5a30e = []byte{
// 452 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0x4d, 0x6b, 0xdb, 0x40,
0x10, 0x45, 0x72, 0x9c, 0x58, 0xe3, 0xe0, 0x24, 0x0b, 0x0d, 0xea, 0xb7, 0xd1, 0xc9, 0xa5, 0x20,
0x43, 0x52, 0x4a, 0xa1, 0xd0, 0x92, 0x94, 0x52, 0x72, 0xaa, 0xd9, 0x12, 0x0a, 0xbd, 0x88, 0xed,
0x6a, 0x2b, 0x6f, 0xb3, 0xd1, 0xa8, 0xfb, 0x11, 0x43, 0x0f, 0xfd, 0x25, 0x3d, 0xf5, 0x97, 0xf5,
0xa7, 0x04, 0xad, 0x14, 0x5b, 0x72, 0x8e, 0x33, 0xef, 0xbd, 0x99, 0x79, 0x6f, 0x17, 0x1e, 0x14,
0x88, 0x85, 0x12, 0x73, 0x56, 0xc9, 0x39, 0x73, 0x76, 0x99, 0x56, 0x1a, 0x2d, 0x12, 0x68, 0xda,
0x29, 0xab, 0x64, 0xf2, 0x07, 0x26, 0x67, 0xce, 0x2e, 0x45, 0x69, 0x25, 0x67, 0x56, 0x62, 0x49,
0x5e, 0xc1, 0x50, 0x3b, 0x25, 0x4c, 0x3c, 0x98, 0x0e, 0x66, 0xe3, 0x93, 0x67, 0xe9, 0x86, 0x9d,
0xf6, 0xa9, 0xd4, 0x29, 0x41, 0x1b, 0x32, 0x79, 0x0d, 0x51, 0xa5, 0xf1, 0x46, 0xe6, 0x42, 0x9b,
0x78, 0xc7, 0x2b, 0xe3, 0x6d, 0xe5, 0xa2, 0x25, 0xd0, 0x0d, 0x35, 0xf9, 0x1f, 0x00, 0xb9, 0x3f,
0x95, 0x3c, 0x82, 0x91, 0x11, 0x4a, 0x70, 0x8b, 0x3a, 0x0e, 0xa6, 0xc1, 0x2c, 0xa2, 0xeb, 0x9a,
0x9c, 0xc2, 0x10, 0x6b, 0x37, 0x71, 0x38, 0x0d, 0x66, 0xe3, 0x93, 0xa7, 0xdd, 0x35, 0x9f, 0xeb,
0x59, 0x54, 0xfc, 0x72, 0x52, 0x8b, 0x6b, 0x51, 0x5a, 0x43, 0x1b, 0x2e, 0x79, 0x03, 0x31, 0x53,
0x0a, 0x57, 0xd9, 0x4a, 0xda, 0x25, 0x3a, 0x9b, 0x71, 0x2d, 0xf2, 0x7a, 0x29, 0x53, 0xf1, 0x70,
0x1a, 0xcc, 0x46, 0xf4, 0xd8, 0xe3, 0x5f, 0x1b, 0xf8, 0xc3, 0x1a, 0x25, 0xef, 0x61, 0x5f, 0x77,
0x06, 0xc6, 0x7b, 0xde, 0xdc, 0xe3, 0x6d, 0x73, 0x9d, 0xa5, 0xb4, 0x27, 0x48, 0xfe, 0x06, 0xb0,
0xdf, 0xb5, 0x4f, 0x26, 0x10, 0xca, 0xbc, 0xb5, 0x15, 0xca, 0x9c, 0x1c, 0xc3, 0xae, 0x34, 0xc6,
0x09, 0xed, 0x1d, 0x45, 0xb4, 0xad, 0xc8, 0x43, 0x18, 0xfd, 0x5c, 0x5d, 0x99, 0xcc, 0x69, 0x19,
0x0f, 0x3c, 0xb2, 0x57, 0xd7, 0x97, 0x5a, 0x92, 0x27, 0x10, 0x31, 0x97, 0x4b, 0x51, 0x72, 0x51,
0xc7, 0x5d, 0x63, 0x9b, 0x06, 0x79, 0x09, 0x47, 0xb5, 0x69, 0xd4, 0xf2, 0xb7, 0x8f, 0x34, 0x73,
0xba, 0x71, 0x19, 0xd1, 0xc3, 0x1e, 0x70, 0xa9, 0x55, 0xf2, 0x0e, 0x8e, 0xee, 0xa5, 0x46, 0x5e,
0xc0, 0x21, 0x67, 0x25, 0x96, 0x92, 0x33, 0x95, 0x19, 0x8e, 0x95, 0x30, 0xed, 0xc1, 0x07, 0xeb,
0xfe, 0x17, 0xdf, 0x4e, 0x16, 0x70, 0xb0, 0x25, 0x27, 0xcf, 0x61, 0x7c, 0xf7, 0xc2, 0xd9, 0xda,
0x29, 0xdc, 0xb5, 0x2e, 0xf2, 0xfe, 0xf9, 0xe1, 0xd6, 0xf9, 0xe7, 0x57, 0x30, 0xe1, 0x78, 0xdd,
0x09, 0xf8, 0x3c, 0x6a, 0xf3, 0xb3, 0xb8, 0x08, 0xbe, 0x7d, 0x6c, 0x81, 0x02, 0x15, 0x2b, 0x8b,
0x14, 0x75, 0x31, 0x2f, 0x44, 0xe9, 0xbf, 0xf6, 0xbc, 0x81, 0x58, 0x25, 0x8d, 0xff, 0xf4, 0x46,
0xe8, 0x1b, 0xc9, 0x05, 0xc7, 0xf2, 0x87, 0x2c, 0xde, 0xf6, 0xaa, 0x7f, 0xe1, 0xce, 0xa7, 0xb3,
0xc5, 0xc5, 0xf7, 0x5d, 0x2f, 0x3c, 0xbd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x91, 0x94, 0x96,
0x2c, 0x03, 0x00, 0x00,
}
| {
return m.Rules
} | conditional_block |
auth.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/auth.proto
package serviceconfig // import "google.golang.org/genproto/googleapis/api/serviceconfig"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Authentication` defines the authentication configuration for an API.
//
// Example for an API targeted for external use:
//
// name: calendar.googleapis.com
// authentication:
// providers:
// - id: google_calendar_auth
// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
// issuer: https://securetoken.google.com
// rules:
// - selector: "*"
// requirements:
// provider_id: google_calendar_auth
type Authentication struct {
// A list of authentication rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
// Defines a set of authentication providers that a service supports.
Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers,proto3" json:"providers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Authentication) Reset() { *m = Authentication{} }
func (m *Authentication) String() string { return proto.CompactTextString(m) }
func (*Authentication) ProtoMessage() {}
func (*Authentication) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{0}
}
func (m *Authentication) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Authentication.Unmarshal(m, b)
}
func (m *Authentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Authentication.Marshal(b, m, deterministic)
}
func (dst *Authentication) XXX_Merge(src proto.Message) {
xxx_messageInfo_Authentication.Merge(dst, src)
}
func (m *Authentication) XXX_Size() int {
return xxx_messageInfo_Authentication.Size(m)
}
func (m *Authentication) XXX_DiscardUnknown() {
xxx_messageInfo_Authentication.DiscardUnknown(m)
}
var xxx_messageInfo_Authentication proto.InternalMessageInfo
func (m *Authentication) GetRules() []*AuthenticationRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Authentication) GetProviders() []*AuthProvider {
if m != nil {
return m.Providers
}
return nil
}
// Authentication rules for the service.
//
// By default, if a method has any authentication requirements, every request
// must include a valid credential matching one of the requirements.
// It's an error to include more than one kind of credential in a single
// request.
//
// If a method doesn't have any auth requirements, request credentials will be
// ignored.
type AuthenticationRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// The requirements for OAuth credentials.
Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth,proto3" json:"oauth,omitempty"`
// If true, the service accepts API keys without any other credential.
AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential,proto3" json:"allow_without_credential,omitempty"`
// Requirements for additional authentication providers.
Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements,proto3" json:"requirements,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthenticationRule) Reset() { *m = AuthenticationRule{} }
func (m *AuthenticationRule) String() string { return proto.CompactTextString(m) }
func (*AuthenticationRule) ProtoMessage() {}
func (*AuthenticationRule) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{1}
}
func (m *AuthenticationRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthenticationRule.Unmarshal(m, b)
}
func (m *AuthenticationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthenticationRule.Marshal(b, m, deterministic)
}
func (dst *AuthenticationRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthenticationRule.Merge(dst, src)
}
func (m *AuthenticationRule) XXX_Size() int {
return xxx_messageInfo_AuthenticationRule.Size(m)
}
func (m *AuthenticationRule) XXX_DiscardUnknown() {
xxx_messageInfo_AuthenticationRule.DiscardUnknown(m)
}
var xxx_messageInfo_AuthenticationRule proto.InternalMessageInfo
func (m *AuthenticationRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *AuthenticationRule) GetOauth() *OAuthRequirements {
if m != nil {
return m.Oauth
}
return nil
}
func (m *AuthenticationRule) GetAllowWithoutCredential() bool {
if m != nil {
return m.AllowWithoutCredential
}
return false
}
func (m *AuthenticationRule) GetRequirements() []*AuthRequirement {
if m != nil {
return m.Requirements
}
return nil
}
// Configuration for an authentication provider, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthProvider struct {
// The unique identifier of the auth provider. It will be referred to by
// `AuthRequirement.provider_id`.
//
// Example: "bookstore_auth".
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Identifies the principal that issued the JWT. See
// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
// Usually a URL or an email address.
//
// Example: https://securetoken.google.com
// Example: [email protected]
Issuer string `protobuf:"bytes,2,opt,name=issuer,proto3" json:"issuer,omitempty"`
// URL of the provider's public key set to validate signature of the JWT. See
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
// Optional if the key set document:
// - can be retrieved from
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of
// the issuer.
// - can be inferred from the email domain of the issuer (e.g. a Google
// service account).
//
// Example: https://www.googleapis.com/oauth2/v1/certs
JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri,proto3" json:"jwks_uri,omitempty"`
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,4,opt,name=audiences,proto3" json:"audiences,omitempty"`
// Redirect URL if JWT token is required but not present or is expired.
// Implement authorizationUrl of securityDefinitions in OpenAPI spec.
AuthorizationUrl string `protobuf:"bytes,5,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthProvider) Reset() { *m = AuthProvider{} }
func (m *AuthProvider) String() string { return proto.CompactTextString(m) }
func (*AuthProvider) ProtoMessage() {}
func (*AuthProvider) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{2}
}
func (m *AuthProvider) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthProvider.Unmarshal(m, b)
}
func (m *AuthProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthProvider.Marshal(b, m, deterministic)
}
func (dst *AuthProvider) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthProvider.Merge(dst, src)
}
func (m *AuthProvider) XXX_Size() int {
return xxx_messageInfo_AuthProvider.Size(m)
}
func (m *AuthProvider) XXX_DiscardUnknown() {
xxx_messageInfo_AuthProvider.DiscardUnknown(m)
}
var xxx_messageInfo_AuthProvider proto.InternalMessageInfo
func (m *AuthProvider) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AuthProvider) GetIssuer() string {
if m != nil {
return m.Issuer
}
return ""
}
func (m *AuthProvider) GetJwksUri() string {
if m != nil {
return m.JwksUri
}
return ""
}
func (m *AuthProvider) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func (m *AuthProvider) GetAuthorizationUrl() string {
if m != nil {
return m.AuthorizationUrl
}
return ""
}
// OAuth scopes are a way to define data and permissions on data. For example,
// there are scopes defined for "Read-only access to Google Calendar" and
// "Access to Cloud Platform". Users can consent to a scope for an application,
// giving it permission to access that data on their behalf.
//
// OAuth scope specifications should be fairly coarse grained; a user will need
// to see and understand the text description of what your scope means.
//
// In most cases: use one or at most two OAuth scopes for an entire family of
// products. If your product has multiple APIs, you should probably be sharing
// the OAuth scope across all of those APIs.
//
// When you need finer grained OAuth consent screens: talk with your product
// management about how developers will use them in practice.
//
// Please note that even though each of the canonical scopes is enough for a
// request to be accepted and passed to the backend, a request can still fail
// due to the backend requiring additional scopes or permissions.
type OAuthRequirements struct {
// The list of publicly documented OAuth scopes that are allowed access. An
// OAuth token containing any of these scopes will be accepted.
//
// Example:
//
// canonical_scopes: https://www.googleapis.com/auth/calendar,
// https://www.googleapis.com/auth/calendar.read
CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes,proto3" json:"canonical_scopes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OAuthRequirements) Reset() { *m = OAuthRequirements{} }
func (m *OAuthRequirements) String() string { return proto.CompactTextString(m) }
func (*OAuthRequirements) ProtoMessage() {}
func (*OAuthRequirements) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{3}
}
func (m *OAuthRequirements) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OAuthRequirements.Unmarshal(m, b)
}
func (m *OAuthRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OAuthRequirements.Marshal(b, m, deterministic)
}
func (dst *OAuthRequirements) XXX_Merge(src proto.Message) {
xxx_messageInfo_OAuthRequirements.Merge(dst, src)
}
func (m *OAuthRequirements) XXX_Size() int {
return xxx_messageInfo_OAuthRequirements.Size(m)
}
func (m *OAuthRequirements) XXX_DiscardUnknown() {
xxx_messageInfo_OAuthRequirements.DiscardUnknown(m)
}
var xxx_messageInfo_OAuthRequirements proto.InternalMessageInfo
func (m *OAuthRequirements) GetCanonicalScopes() string {
if m != nil {
return m.CanonicalScopes
}
return ""
}
// User-defined authentication requirements, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthRequirement struct {
// [id][google.api.AuthProvider.id] from authentication provider.
//
// Example:
//
// provider_id: bookstore_auth | //
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,2,opt,name=audiences,proto3" json:"audiences,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthRequirement) Reset() { *m = AuthRequirement{} }
func (m *AuthRequirement) String() string { return proto.CompactTextString(m) }
func (*AuthRequirement) ProtoMessage() {}
func (*AuthRequirement) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{4}
}
func (m *AuthRequirement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthRequirement.Unmarshal(m, b)
}
func (m *AuthRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthRequirement.Marshal(b, m, deterministic)
}
func (dst *AuthRequirement) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthRequirement.Merge(dst, src)
}
func (m *AuthRequirement) XXX_Size() int {
return xxx_messageInfo_AuthRequirement.Size(m)
}
func (m *AuthRequirement) XXX_DiscardUnknown() {
xxx_messageInfo_AuthRequirement.DiscardUnknown(m)
}
var xxx_messageInfo_AuthRequirement proto.InternalMessageInfo
func (m *AuthRequirement) GetProviderId() string {
if m != nil {
return m.ProviderId
}
return ""
}
func (m *AuthRequirement) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func init() {
proto.RegisterType((*Authentication)(nil), "google.api.Authentication")
proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule")
proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider")
proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements")
proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement")
}
func init() { proto.RegisterFile("google/api/auth.proto", fileDescriptor_auth_20a218c05ef5a30e) }
var fileDescriptor_auth_20a218c05ef5a30e = []byte{
// 452 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0x4d, 0x6b, 0xdb, 0x40,
0x10, 0x45, 0x72, 0x9c, 0x58, 0xe3, 0xe0, 0x24, 0x0b, 0x0d, 0xea, 0xb7, 0xd1, 0xc9, 0xa5, 0x20,
0x43, 0x52, 0x4a, 0xa1, 0xd0, 0x92, 0x94, 0x52, 0x72, 0xaa, 0xd9, 0x12, 0x0a, 0xbd, 0x88, 0xed,
0x6a, 0x2b, 0x6f, 0xb3, 0xd1, 0xa8, 0xfb, 0x11, 0x43, 0x0f, 0xfd, 0x25, 0x3d, 0xf5, 0x97, 0xf5,
0xa7, 0x04, 0xad, 0x14, 0x5b, 0x72, 0x8e, 0x33, 0xef, 0xbd, 0x99, 0x79, 0x6f, 0x17, 0x1e, 0x14,
0x88, 0x85, 0x12, 0x73, 0x56, 0xc9, 0x39, 0x73, 0x76, 0x99, 0x56, 0x1a, 0x2d, 0x12, 0x68, 0xda,
0x29, 0xab, 0x64, 0xf2, 0x07, 0x26, 0x67, 0xce, 0x2e, 0x45, 0x69, 0x25, 0x67, 0x56, 0x62, 0x49,
0x5e, 0xc1, 0x50, 0x3b, 0x25, 0x4c, 0x3c, 0x98, 0x0e, 0x66, 0xe3, 0x93, 0x67, 0xe9, 0x86, 0x9d,
0xf6, 0xa9, 0xd4, 0x29, 0x41, 0x1b, 0x32, 0x79, 0x0d, 0x51, 0xa5, 0xf1, 0x46, 0xe6, 0x42, 0x9b,
0x78, 0xc7, 0x2b, 0xe3, 0x6d, 0xe5, 0xa2, 0x25, 0xd0, 0x0d, 0x35, 0xf9, 0x1f, 0x00, 0xb9, 0x3f,
0x95, 0x3c, 0x82, 0x91, 0x11, 0x4a, 0x70, 0x8b, 0x3a, 0x0e, 0xa6, 0xc1, 0x2c, 0xa2, 0xeb, 0x9a,
0x9c, 0xc2, 0x10, 0x6b, 0x37, 0x71, 0x38, 0x0d, 0x66, 0xe3, 0x93, 0xa7, 0xdd, 0x35, 0x9f, 0xeb,
0x59, 0x54, 0xfc, 0x72, 0x52, 0x8b, 0x6b, 0x51, 0x5a, 0x43, 0x1b, 0x2e, 0x79, 0x03, 0x31, 0x53,
0x0a, 0x57, 0xd9, 0x4a, 0xda, 0x25, 0x3a, 0x9b, 0x71, 0x2d, 0xf2, 0x7a, 0x29, 0x53, 0xf1, 0x70,
0x1a, 0xcc, 0x46, 0xf4, 0xd8, 0xe3, 0x5f, 0x1b, 0xf8, 0xc3, 0x1a, 0x25, 0xef, 0x61, 0x5f, 0x77,
0x06, 0xc6, 0x7b, 0xde, 0xdc, 0xe3, 0x6d, 0x73, 0x9d, 0xa5, 0xb4, 0x27, 0x48, 0xfe, 0x06, 0xb0,
0xdf, 0xb5, 0x4f, 0x26, 0x10, 0xca, 0xbc, 0xb5, 0x15, 0xca, 0x9c, 0x1c, 0xc3, 0xae, 0x34, 0xc6,
0x09, 0xed, 0x1d, 0x45, 0xb4, 0xad, 0xc8, 0x43, 0x18, 0xfd, 0x5c, 0x5d, 0x99, 0xcc, 0x69, 0x19,
0x0f, 0x3c, 0xb2, 0x57, 0xd7, 0x97, 0x5a, 0x92, 0x27, 0x10, 0x31, 0x97, 0x4b, 0x51, 0x72, 0x51,
0xc7, 0x5d, 0x63, 0x9b, 0x06, 0x79, 0x09, 0x47, 0xb5, 0x69, 0xd4, 0xf2, 0xb7, 0x8f, 0x34, 0x73,
0xba, 0x71, 0x19, 0xd1, 0xc3, 0x1e, 0x70, 0xa9, 0x55, 0xf2, 0x0e, 0x8e, 0xee, 0xa5, 0x46, 0x5e,
0xc0, 0x21, 0x67, 0x25, 0x96, 0x92, 0x33, 0x95, 0x19, 0x8e, 0x95, 0x30, 0xed, 0xc1, 0x07, 0xeb,
0xfe, 0x17, 0xdf, 0x4e, 0x16, 0x70, 0xb0, 0x25, 0x27, 0xcf, 0x61, 0x7c, 0xf7, 0xc2, 0xd9, 0xda,
0x29, 0xdc, 0xb5, 0x2e, 0xf2, 0xfe, 0xf9, 0xe1, 0xd6, 0xf9, 0xe7, 0x57, 0x30, 0xe1, 0x78, 0xdd,
0x09, 0xf8, 0x3c, 0x6a, 0xf3, 0xb3, 0xb8, 0x08, 0xbe, 0x7d, 0x6c, 0x81, 0x02, 0x15, 0x2b, 0x8b,
0x14, 0x75, 0x31, 0x2f, 0x44, 0xe9, 0xbf, 0xf6, 0xbc, 0x81, 0x58, 0x25, 0x8d, 0xff, 0xf4, 0x46,
0xe8, 0x1b, 0xc9, 0x05, 0xc7, 0xf2, 0x87, 0x2c, 0xde, 0xf6, 0xaa, 0x7f, 0xe1, 0xce, 0xa7, 0xb3,
0xc5, 0xc5, 0xf7, 0x5d, 0x2f, 0x3c, 0xbd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x91, 0x94, 0x96,
0x2c, 0x03, 0x00, 0x00,
} | ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
// NOTE: This will be deprecated soon, once AuthProvider.audiences is
// implemented and accepted in all the runtime components. | random_line_split |
auth.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/auth.proto
package serviceconfig // import "google.golang.org/genproto/googleapis/api/serviceconfig"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Authentication` defines the authentication configuration for an API.
//
// Example for an API targeted for external use:
//
// name: calendar.googleapis.com
// authentication:
// providers:
// - id: google_calendar_auth
// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
// issuer: https://securetoken.google.com
// rules:
// - selector: "*"
// requirements:
// provider_id: google_calendar_auth
type Authentication struct {
// A list of authentication rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
// Defines a set of authentication providers that a service supports.
Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers,proto3" json:"providers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Authentication) Reset() { *m = Authentication{} }
func (m *Authentication) String() string { return proto.CompactTextString(m) }
func (*Authentication) ProtoMessage() {}
func (*Authentication) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{0}
}
func (m *Authentication) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Authentication.Unmarshal(m, b)
}
func (m *Authentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Authentication.Marshal(b, m, deterministic)
}
func (dst *Authentication) XXX_Merge(src proto.Message) {
xxx_messageInfo_Authentication.Merge(dst, src)
}
func (m *Authentication) XXX_Size() int {
return xxx_messageInfo_Authentication.Size(m)
}
func (m *Authentication) XXX_DiscardUnknown() {
xxx_messageInfo_Authentication.DiscardUnknown(m)
}
var xxx_messageInfo_Authentication proto.InternalMessageInfo
func (m *Authentication) GetRules() []*AuthenticationRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Authentication) GetProviders() []*AuthProvider {
if m != nil {
return m.Providers
}
return nil
}
// Authentication rules for the service.
//
// By default, if a method has any authentication requirements, every request
// must include a valid credential matching one of the requirements.
// It's an error to include more than one kind of credential in a single
// request.
//
// If a method doesn't have any auth requirements, request credentials will be
// ignored.
type AuthenticationRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// The requirements for OAuth credentials.
Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth,proto3" json:"oauth,omitempty"`
// If true, the service accepts API keys without any other credential.
AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential,proto3" json:"allow_without_credential,omitempty"`
// Requirements for additional authentication providers.
Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements,proto3" json:"requirements,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthenticationRule) Reset() { *m = AuthenticationRule{} }
func (m *AuthenticationRule) String() string { return proto.CompactTextString(m) }
func (*AuthenticationRule) ProtoMessage() {}
func (*AuthenticationRule) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{1}
}
func (m *AuthenticationRule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthenticationRule.Unmarshal(m, b)
}
func (m *AuthenticationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthenticationRule.Marshal(b, m, deterministic)
}
func (dst *AuthenticationRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthenticationRule.Merge(dst, src)
}
func (m *AuthenticationRule) XXX_Size() int {
return xxx_messageInfo_AuthenticationRule.Size(m)
}
func (m *AuthenticationRule) XXX_DiscardUnknown() {
xxx_messageInfo_AuthenticationRule.DiscardUnknown(m)
}
var xxx_messageInfo_AuthenticationRule proto.InternalMessageInfo
func (m *AuthenticationRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *AuthenticationRule) GetOauth() *OAuthRequirements {
if m != nil {
return m.Oauth
}
return nil
}
func (m *AuthenticationRule) GetAllowWithoutCredential() bool {
if m != nil {
return m.AllowWithoutCredential
}
return false
}
func (m *AuthenticationRule) GetRequirements() []*AuthRequirement {
if m != nil {
return m.Requirements
}
return nil
}
// Configuration for an authentication provider, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthProvider struct {
// The unique identifier of the auth provider. It will be referred to by
// `AuthRequirement.provider_id`.
//
// Example: "bookstore_auth".
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Identifies the principal that issued the JWT. See
// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
// Usually a URL or an email address.
//
// Example: https://securetoken.google.com
// Example: [email protected]
Issuer string `protobuf:"bytes,2,opt,name=issuer,proto3" json:"issuer,omitempty"`
// URL of the provider's public key set to validate signature of the JWT. See
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
// Optional if the key set document:
// - can be retrieved from
// [OpenID
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of
// the issuer.
// - can be inferred from the email domain of the issuer (e.g. a Google
// service account).
//
// Example: https://www.googleapis.com/oauth2/v1/certs
JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri,proto3" json:"jwks_uri,omitempty"`
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,4,opt,name=audiences,proto3" json:"audiences,omitempty"`
// Redirect URL if JWT token is required but not present or is expired.
// Implement authorizationUrl of securityDefinitions in OpenAPI spec.
AuthorizationUrl string `protobuf:"bytes,5,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthProvider) Reset() { *m = AuthProvider{} }
func (m *AuthProvider) String() string { return proto.CompactTextString(m) }
func (*AuthProvider) ProtoMessage() {}
func (*AuthProvider) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{2}
}
func (m *AuthProvider) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthProvider.Unmarshal(m, b)
}
func (m *AuthProvider) | (b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthProvider.Marshal(b, m, deterministic)
}
func (dst *AuthProvider) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthProvider.Merge(dst, src)
}
func (m *AuthProvider) XXX_Size() int {
return xxx_messageInfo_AuthProvider.Size(m)
}
func (m *AuthProvider) XXX_DiscardUnknown() {
xxx_messageInfo_AuthProvider.DiscardUnknown(m)
}
var xxx_messageInfo_AuthProvider proto.InternalMessageInfo
func (m *AuthProvider) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AuthProvider) GetIssuer() string {
if m != nil {
return m.Issuer
}
return ""
}
func (m *AuthProvider) GetJwksUri() string {
if m != nil {
return m.JwksUri
}
return ""
}
func (m *AuthProvider) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func (m *AuthProvider) GetAuthorizationUrl() string {
if m != nil {
return m.AuthorizationUrl
}
return ""
}
// OAuth scopes are a way to define data and permissions on data. For example,
// there are scopes defined for "Read-only access to Google Calendar" and
// "Access to Cloud Platform". Users can consent to a scope for an application,
// giving it permission to access that data on their behalf.
//
// OAuth scope specifications should be fairly coarse grained; a user will need
// to see and understand the text description of what your scope means.
//
// In most cases: use one or at most two OAuth scopes for an entire family of
// products. If your product has multiple APIs, you should probably be sharing
// the OAuth scope across all of those APIs.
//
// When you need finer grained OAuth consent screens: talk with your product
// management about how developers will use them in practice.
//
// Please note that even though each of the canonical scopes is enough for a
// request to be accepted and passed to the backend, a request can still fail
// due to the backend requiring additional scopes or permissions.
type OAuthRequirements struct {
// The list of publicly documented OAuth scopes that are allowed access. An
// OAuth token containing any of these scopes will be accepted.
//
// Example:
//
// canonical_scopes: https://www.googleapis.com/auth/calendar,
// https://www.googleapis.com/auth/calendar.read
CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes,proto3" json:"canonical_scopes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *OAuthRequirements) Reset() { *m = OAuthRequirements{} }
func (m *OAuthRequirements) String() string { return proto.CompactTextString(m) }
func (*OAuthRequirements) ProtoMessage() {}
func (*OAuthRequirements) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{3}
}
func (m *OAuthRequirements) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OAuthRequirements.Unmarshal(m, b)
}
func (m *OAuthRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OAuthRequirements.Marshal(b, m, deterministic)
}
func (dst *OAuthRequirements) XXX_Merge(src proto.Message) {
xxx_messageInfo_OAuthRequirements.Merge(dst, src)
}
func (m *OAuthRequirements) XXX_Size() int {
return xxx_messageInfo_OAuthRequirements.Size(m)
}
func (m *OAuthRequirements) XXX_DiscardUnknown() {
xxx_messageInfo_OAuthRequirements.DiscardUnknown(m)
}
var xxx_messageInfo_OAuthRequirements proto.InternalMessageInfo
func (m *OAuthRequirements) GetCanonicalScopes() string {
if m != nil {
return m.CanonicalScopes
}
return ""
}
// User-defined authentication requirements, including support for
// [JSON Web Token
// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthRequirement struct {
// [id][google.api.AuthProvider.id] from authentication provider.
//
// Example:
//
// provider_id: bookstore_auth
ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
// NOTE: This will be deprecated soon, once AuthProvider.audiences is
// implemented and accepted in all the runtime components.
//
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,2,opt,name=audiences,proto3" json:"audiences,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthRequirement) Reset() { *m = AuthRequirement{} }
func (m *AuthRequirement) String() string { return proto.CompactTextString(m) }
func (*AuthRequirement) ProtoMessage() {}
func (*AuthRequirement) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_20a218c05ef5a30e, []int{4}
}
func (m *AuthRequirement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthRequirement.Unmarshal(m, b)
}
func (m *AuthRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthRequirement.Marshal(b, m, deterministic)
}
func (dst *AuthRequirement) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthRequirement.Merge(dst, src)
}
func (m *AuthRequirement) XXX_Size() int {
return xxx_messageInfo_AuthRequirement.Size(m)
}
func (m *AuthRequirement) XXX_DiscardUnknown() {
xxx_messageInfo_AuthRequirement.DiscardUnknown(m)
}
var xxx_messageInfo_AuthRequirement proto.InternalMessageInfo
func (m *AuthRequirement) GetProviderId() string {
if m != nil {
return m.ProviderId
}
return ""
}
func (m *AuthRequirement) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func init() {
proto.RegisterType((*Authentication)(nil), "google.api.Authentication")
proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule")
proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider")
proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements")
proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement")
}
func init() { proto.RegisterFile("google/api/auth.proto", fileDescriptor_auth_20a218c05ef5a30e) }
var fileDescriptor_auth_20a218c05ef5a30e = []byte{
// 452 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0x4d, 0x6b, 0xdb, 0x40,
0x10, 0x45, 0x72, 0x9c, 0x58, 0xe3, 0xe0, 0x24, 0x0b, 0x0d, 0xea, 0xb7, 0xd1, 0xc9, 0xa5, 0x20,
0x43, 0x52, 0x4a, 0xa1, 0xd0, 0x92, 0x94, 0x52, 0x72, 0xaa, 0xd9, 0x12, 0x0a, 0xbd, 0x88, 0xed,
0x6a, 0x2b, 0x6f, 0xb3, 0xd1, 0xa8, 0xfb, 0x11, 0x43, 0x0f, 0xfd, 0x25, 0x3d, 0xf5, 0x97, 0xf5,
0xa7, 0x04, 0xad, 0x14, 0x5b, 0x72, 0x8e, 0x33, 0xef, 0xbd, 0x99, 0x79, 0x6f, 0x17, 0x1e, 0x14,
0x88, 0x85, 0x12, 0x73, 0x56, 0xc9, 0x39, 0x73, 0x76, 0x99, 0x56, 0x1a, 0x2d, 0x12, 0x68, 0xda,
0x29, 0xab, 0x64, 0xf2, 0x07, 0x26, 0x67, 0xce, 0x2e, 0x45, 0x69, 0x25, 0x67, 0x56, 0x62, 0x49,
0x5e, 0xc1, 0x50, 0x3b, 0x25, 0x4c, 0x3c, 0x98, 0x0e, 0x66, 0xe3, 0x93, 0x67, 0xe9, 0x86, 0x9d,
0xf6, 0xa9, 0xd4, 0x29, 0x41, 0x1b, 0x32, 0x79, 0x0d, 0x51, 0xa5, 0xf1, 0x46, 0xe6, 0x42, 0x9b,
0x78, 0xc7, 0x2b, 0xe3, 0x6d, 0xe5, 0xa2, 0x25, 0xd0, 0x0d, 0x35, 0xf9, 0x1f, 0x00, 0xb9, 0x3f,
0x95, 0x3c, 0x82, 0x91, 0x11, 0x4a, 0x70, 0x8b, 0x3a, 0x0e, 0xa6, 0xc1, 0x2c, 0xa2, 0xeb, 0x9a,
0x9c, 0xc2, 0x10, 0x6b, 0x37, 0x71, 0x38, 0x0d, 0x66, 0xe3, 0x93, 0xa7, 0xdd, 0x35, 0x9f, 0xeb,
0x59, 0x54, 0xfc, 0x72, 0x52, 0x8b, 0x6b, 0x51, 0x5a, 0x43, 0x1b, 0x2e, 0x79, 0x03, 0x31, 0x53,
0x0a, 0x57, 0xd9, 0x4a, 0xda, 0x25, 0x3a, 0x9b, 0x71, 0x2d, 0xf2, 0x7a, 0x29, 0x53, 0xf1, 0x70,
0x1a, 0xcc, 0x46, 0xf4, 0xd8, 0xe3, 0x5f, 0x1b, 0xf8, 0xc3, 0x1a, 0x25, 0xef, 0x61, 0x5f, 0x77,
0x06, 0xc6, 0x7b, 0xde, 0xdc, 0xe3, 0x6d, 0x73, 0x9d, 0xa5, 0xb4, 0x27, 0x48, 0xfe, 0x06, 0xb0,
0xdf, 0xb5, 0x4f, 0x26, 0x10, 0xca, 0xbc, 0xb5, 0x15, 0xca, 0x9c, 0x1c, 0xc3, 0xae, 0x34, 0xc6,
0x09, 0xed, 0x1d, 0x45, 0xb4, 0xad, 0xc8, 0x43, 0x18, 0xfd, 0x5c, 0x5d, 0x99, 0xcc, 0x69, 0x19,
0x0f, 0x3c, 0xb2, 0x57, 0xd7, 0x97, 0x5a, 0x92, 0x27, 0x10, 0x31, 0x97, 0x4b, 0x51, 0x72, 0x51,
0xc7, 0x5d, 0x63, 0x9b, 0x06, 0x79, 0x09, 0x47, 0xb5, 0x69, 0xd4, 0xf2, 0xb7, 0x8f, 0x34, 0x73,
0xba, 0x71, 0x19, 0xd1, 0xc3, 0x1e, 0x70, 0xa9, 0x55, 0xf2, 0x0e, 0x8e, 0xee, 0xa5, 0x46, 0x5e,
0xc0, 0x21, 0x67, 0x25, 0x96, 0x92, 0x33, 0x95, 0x19, 0x8e, 0x95, 0x30, 0xed, 0xc1, 0x07, 0xeb,
0xfe, 0x17, 0xdf, 0x4e, 0x16, 0x70, 0xb0, 0x25, 0x27, 0xcf, 0x61, 0x7c, 0xf7, 0xc2, 0xd9, 0xda,
0x29, 0xdc, 0xb5, 0x2e, 0xf2, 0xfe, 0xf9, 0xe1, 0xd6, 0xf9, 0xe7, 0x57, 0x30, 0xe1, 0x78, 0xdd,
0x09, 0xf8, 0x3c, 0x6a, 0xf3, 0xb3, 0xb8, 0x08, 0xbe, 0x7d, 0x6c, 0x81, 0x02, 0x15, 0x2b, 0x8b,
0x14, 0x75, 0x31, 0x2f, 0x44, 0xe9, 0xbf, 0xf6, 0xbc, 0x81, 0x58, 0x25, 0x8d, 0xff, 0xf4, 0x46,
0xe8, 0x1b, 0xc9, 0x05, 0xc7, 0xf2, 0x87, 0x2c, 0xde, 0xf6, 0xaa, 0x7f, 0xe1, 0xce, 0xa7, 0xb3,
0xc5, 0xc5, 0xf7, 0x5d, 0x2f, 0x3c, 0xbd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x91, 0x94, 0x96,
0x2c, 0x03, 0x00, 0x00,
}
| XXX_Marshal | identifier_name |
info.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import inspect
import textwrap
from six.moves import zip_longest
import llnl.util.tty as tty
import llnl.util.tty.color as color
from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
from spack.package_base import has_test_method, preferred_version
description = "get detailed information on a particular package"
section = "basic"
level = "short"
header_color = "@*b"
plain_format = "@."
def padder(str_list, extra=0):
"""Return a function to pad elements of a list."""
length = max(len(str(s)) for s in str_list) + extra
def pad(string):
string = str(string)
padding = max(0, length - len(string))
return string + (padding * " ")
return pad
def setup_parser(subparser):
subparser.add_argument(
"-a", "--all", action="store_true", default=False, help="output all package information"
)
options = [
("--detectable", print_detectable.__doc__),
("--maintainers", print_maintainers.__doc__),
("--no-dependencies", "do not " + print_dependencies.__doc__),
("--no-variants", "do not " + print_variants.__doc__),
("--no-versions", "do not " + print_versions.__doc__),
("--phases", print_phases.__doc__),
("--tags", print_tags.__doc__),
("--tests", print_tests.__doc__),
("--virtuals", print_virtuals.__doc__),
]
for opt, help_comment in options:
subparser.add_argument(opt, action="store_true", help=help_comment)
arguments.add_common_arguments(subparser, ["package"])
def section_title(s):
return header_color + s + plain_format
def version(s):
return spack.spec.version_color + s + plain_format
def variant(s):
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter(object):
def __init__(self, variants):
|
def default(self, v):
s = "on" if v.default is True else "off"
if not isinstance(v.default, bool):
s = v.default
return s
@property
def lines(self):
if not self.variants:
yield " None"
else:
yield " " + self.fmt % self.headers
underline = tuple([w * "=" for w in self.column_widths])
yield " " + self.fmt % underline
yield ""
for k, e in sorted(self.variants.items()):
v, w = e
name = textwrap.wrap(
"{0} [{1}]".format(k, self.default(v)), width=self.column_widths[0]
)
if all(spec == spack.spec.Spec() for spec in w):
w = "--"
when = textwrap.wrap(str(w), width=self.column_widths[1])
allowed = v.allowed_values.replace("True, False", "on, off")
allowed = textwrap.wrap(allowed, width=self.column_widths[2])
description = []
for d_line in v.description.split("\n"):
description += textwrap.wrap(d_line, width=self.column_widths[3])
for t in zip_longest(name, when, allowed, description, fillvalue=""):
yield " " + self.fmt % t
def print_dependencies(pkg):
"""output build, link, and run package dependencies"""
for deptype in ("build", "link", "run"):
color.cprint("")
color.cprint(section_title("%s Dependencies:" % deptype.capitalize()))
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
color.cprint(" None")
def print_detectable(pkg):
"""output information on external detection"""
color.cprint("")
color.cprint(section_title("Externally Detectable: "))
# If the package has an 'executables' of 'libraries' field, it
# can detect an installation
if hasattr(pkg, "executables") or hasattr(pkg, "libraries"):
find_attributes = []
if hasattr(pkg, "determine_version"):
find_attributes.append("version")
if hasattr(pkg, "determine_variants"):
find_attributes.append("variants")
# If the package does not define 'determine_version' nor
# 'determine_variants', then it must use some custom detection
# mechanism. In this case, just inform the user it's detectable somehow.
color.cprint(
" True{0}".format(
" (" + ", ".join(find_attributes) + ")" if find_attributes else ""
)
)
else:
color.cprint(" False")
def print_maintainers(pkg):
"""output package maintainers"""
if len(pkg.maintainers) > 0:
mnt = " ".join(["@@" + m for m in pkg.maintainers])
color.cprint("")
color.cprint(section_title("Maintainers: ") + mnt)
def print_phases(pkg):
"""output installation phases"""
if hasattr(pkg, "phases") and pkg.phases:
color.cprint("")
color.cprint(section_title("Installation Phases:"))
phase_str = ""
for phase in pkg.phases:
phase_str += " {0}".format(phase)
color.cprint(phase_str)
def print_tags(pkg):
"""output package tags"""
color.cprint("")
color.cprint(section_title("Tags: "))
if hasattr(pkg, "tags"):
tags = sorted(pkg.tags)
colify(tags, indent=4)
else:
color.cprint(" None")
def print_tests(pkg):
"""output relevant build-time and stand-alone tests"""
# Some built-in base packages (e.g., Autotools) define callback (e.g.,
# check) inherited by descendant packages. These checks may not result
# in build-time testing if the package's build does not implement the
# expected functionality (e.g., a 'check' or 'test' targets).
#
# So the presence of a callback in Spack does not necessarily correspond
# to the actual presence of built-time tests for a package.
for callbacks, phase in [
(pkg.build_time_test_callbacks, "Build"),
(pkg.install_time_test_callbacks, "Install"),
]:
color.cprint("")
color.cprint(section_title("Available {0} Phase Test Methods:".format(phase)))
names = []
if callbacks:
for name in callbacks:
if getattr(pkg, name, False):
names.append(name)
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
# PackageBase defines an empty install/smoke test but we want to know
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
test_pkgs = [
str(cls.test)
for cls in inspect.getmro(pkg_cls)
if issubclass(cls, pkg_base) and cls.test != pkg_base.test
]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
if has_test_method(pkg_cls):
names.append("{0}.test".format(pkg_cls.name.lower()))
except spack.repo.UnknownPackageError:
pass
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
def print_variants(pkg):
"""output variants"""
color.cprint("")
color.cprint(section_title("Variants:"))
formatter = VariantFormatter(pkg.variants)
for line in formatter.lines:
color.cprint(color.cescape(line))
def print_versions(pkg):
"""output versions"""
color.cprint("")
color.cprint(section_title("Preferred version: "))
if not pkg.versions:
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Safe versions: "))
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Deprecated versions: "))
color.cprint(version(" None"))
else:
pad = padder(pkg.versions, 4)
preferred = preferred_version(pkg)
url = ""
if pkg.has_code:
url = fs.for_package_version(pkg, preferred)
line = version(" {0}".format(pad(preferred))) + color.cescape(url)
color.cprint(line)
safe = []
deprecated = []
for v in reversed(sorted(pkg.versions)):
if pkg.has_code:
url = fs.for_package_version(pkg, v)
if pkg.versions[v].get("deprecated", False):
deprecated.append((v, url))
else:
safe.append((v, url))
for title, vers in [("Safe", safe), ("Deprecated", deprecated)]:
color.cprint("")
color.cprint(section_title("{0} versions: ".format(title)))
if not vers:
color.cprint(version(" None"))
continue
for v, url in vers:
line = version(" {0}".format(pad(v))) + color.cescape(url)
color.cprint(line)
def print_virtuals(pkg):
"""output virtual packages"""
color.cprint("")
color.cprint(section_title("Virtual Packages: "))
if pkg.provided:
inverse_map = {}
for spec, whens in pkg.provided.items():
for when in whens:
if when not in inverse_map:
inverse_map[when] = set()
inverse_map[when].add(spec)
for when, specs in reversed(sorted(inverse_map.items())):
line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line)
else:
color.cprint(" None")
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
# Output core package information
header = section_title("{0}: ").format(pkg.build_system_class) + pkg.name
color.cprint(header)
color.cprint("")
color.cprint(section_title("Description:"))
if pkg.__doc__:
color.cprint(color.cescape(pkg.format_doc(indent=4)))
else:
color.cprint(" None")
color.cprint(section_title("Homepage: ") + pkg.homepage)
# Now output optional information in expected order
sections = [
(args.all or args.maintainers, print_maintainers),
(args.all or args.detectable, print_detectable),
(args.all or args.tags, print_tags),
(args.all or not args.no_versions, print_versions),
(args.all or not args.no_variants, print_variants),
(args.all or args.phases, print_phases),
(args.all or not args.no_dependencies, print_dependencies),
(args.all or args.virtuals, print_virtuals),
(args.all or args.tests, print_tests),
]
for print_it, func in sections:
if print_it:
func(pkg)
color.cprint("")
| self.variants = variants
self.headers = ("Name [Default]", "When", "Allowed values", "Description")
# Formats
fmt_name = "{0} [{1}]"
# Initialize column widths with the length of the
# corresponding headers, as they cannot be shorter
# than that
self.column_widths = [len(x) for x in self.headers]
# Expand columns based on max line lengths
for k, e in variants.items():
v, w = e
candidate_max_widths = (
len(fmt_name.format(k, self.default(v))), # Name [Default]
len(str(w)),
len(v.allowed_values), # Allowed values
len(v.description), # Description
)
self.column_widths = (
max(self.column_widths[0], candidate_max_widths[0]),
max(self.column_widths[1], candidate_max_widths[1]),
max(self.column_widths[2], candidate_max_widths[2]),
max(self.column_widths[3], candidate_max_widths[3]),
)
# Don't let name or possible values be less than max widths
_, cols = tty.terminal_size()
max_name = min(self.column_widths[0], 30)
max_when = min(self.column_widths[1], 30)
max_vals = min(self.column_widths[2], 20)
# allow the description column to extend as wide as the terminal.
max_description = min(
self.column_widths[3],
# min width 70 cols, 14 cols of margins and column spacing
max(cols, 70) - max_name - max_vals - 14,
)
self.column_widths = (max_name, max_when, max_vals, max_description)
# Compute the format
self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % (
self.column_widths[0] + 4,
self.column_widths[1] + 4,
self.column_widths[2] + 4,
) | identifier_body |
info.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import inspect
import textwrap
from six.moves import zip_longest
import llnl.util.tty as tty
import llnl.util.tty.color as color
from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
from spack.package_base import has_test_method, preferred_version
description = "get detailed information on a particular package"
section = "basic"
level = "short"
header_color = "@*b"
plain_format = "@."
def padder(str_list, extra=0):
"""Return a function to pad elements of a list."""
length = max(len(str(s)) for s in str_list) + extra
def pad(string):
string = str(string)
padding = max(0, length - len(string))
return string + (padding * " ")
return pad
def setup_parser(subparser):
subparser.add_argument(
"-a", "--all", action="store_true", default=False, help="output all package information"
)
options = [
("--detectable", print_detectable.__doc__),
("--maintainers", print_maintainers.__doc__),
("--no-dependencies", "do not " + print_dependencies.__doc__),
("--no-variants", "do not " + print_variants.__doc__),
("--no-versions", "do not " + print_versions.__doc__),
("--phases", print_phases.__doc__),
("--tags", print_tags.__doc__),
("--tests", print_tests.__doc__),
("--virtuals", print_virtuals.__doc__),
]
for opt, help_comment in options:
subparser.add_argument(opt, action="store_true", help=help_comment)
arguments.add_common_arguments(subparser, ["package"])
def section_title(s):
return header_color + s + plain_format
def version(s):
return spack.spec.version_color + s + plain_format
def variant(s):
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter(object):
def __init__(self, variants):
self.variants = variants
self.headers = ("Name [Default]", "When", "Allowed values", "Description")
# Formats
fmt_name = "{0} [{1}]"
# Initialize column widths with the length of the
# corresponding headers, as they cannot be shorter
# than that
self.column_widths = [len(x) for x in self.headers]
# Expand columns based on max line lengths
for k, e in variants.items():
v, w = e
candidate_max_widths = (
len(fmt_name.format(k, self.default(v))), # Name [Default]
len(str(w)),
len(v.allowed_values), # Allowed values
len(v.description), # Description
)
self.column_widths = (
max(self.column_widths[0], candidate_max_widths[0]),
max(self.column_widths[1], candidate_max_widths[1]),
max(self.column_widths[2], candidate_max_widths[2]),
max(self.column_widths[3], candidate_max_widths[3]),
)
# Don't let name or possible values be less than max widths
_, cols = tty.terminal_size()
max_name = min(self.column_widths[0], 30)
max_when = min(self.column_widths[1], 30)
max_vals = min(self.column_widths[2], 20)
# allow the description column to extend as wide as the terminal.
max_description = min(
self.column_widths[3],
# min width 70 cols, 14 cols of margins and column spacing
max(cols, 70) - max_name - max_vals - 14,
)
self.column_widths = (max_name, max_when, max_vals, max_description)
# Compute the format
self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % (
self.column_widths[0] + 4,
self.column_widths[1] + 4,
self.column_widths[2] + 4,
)
def | (self, v):
s = "on" if v.default is True else "off"
if not isinstance(v.default, bool):
s = v.default
return s
@property
def lines(self):
if not self.variants:
yield " None"
else:
yield " " + self.fmt % self.headers
underline = tuple([w * "=" for w in self.column_widths])
yield " " + self.fmt % underline
yield ""
for k, e in sorted(self.variants.items()):
v, w = e
name = textwrap.wrap(
"{0} [{1}]".format(k, self.default(v)), width=self.column_widths[0]
)
if all(spec == spack.spec.Spec() for spec in w):
w = "--"
when = textwrap.wrap(str(w), width=self.column_widths[1])
allowed = v.allowed_values.replace("True, False", "on, off")
allowed = textwrap.wrap(allowed, width=self.column_widths[2])
description = []
for d_line in v.description.split("\n"):
description += textwrap.wrap(d_line, width=self.column_widths[3])
for t in zip_longest(name, when, allowed, description, fillvalue=""):
yield " " + self.fmt % t
def print_dependencies(pkg):
"""output build, link, and run package dependencies"""
for deptype in ("build", "link", "run"):
color.cprint("")
color.cprint(section_title("%s Dependencies:" % deptype.capitalize()))
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
color.cprint(" None")
def print_detectable(pkg):
"""output information on external detection"""
color.cprint("")
color.cprint(section_title("Externally Detectable: "))
# If the package has an 'executables' of 'libraries' field, it
# can detect an installation
if hasattr(pkg, "executables") or hasattr(pkg, "libraries"):
find_attributes = []
if hasattr(pkg, "determine_version"):
find_attributes.append("version")
if hasattr(pkg, "determine_variants"):
find_attributes.append("variants")
# If the package does not define 'determine_version' nor
# 'determine_variants', then it must use some custom detection
# mechanism. In this case, just inform the user it's detectable somehow.
color.cprint(
" True{0}".format(
" (" + ", ".join(find_attributes) + ")" if find_attributes else ""
)
)
else:
color.cprint(" False")
def print_maintainers(pkg):
"""output package maintainers"""
if len(pkg.maintainers) > 0:
mnt = " ".join(["@@" + m for m in pkg.maintainers])
color.cprint("")
color.cprint(section_title("Maintainers: ") + mnt)
def print_phases(pkg):
"""output installation phases"""
if hasattr(pkg, "phases") and pkg.phases:
color.cprint("")
color.cprint(section_title("Installation Phases:"))
phase_str = ""
for phase in pkg.phases:
phase_str += " {0}".format(phase)
color.cprint(phase_str)
def print_tags(pkg):
"""output package tags"""
color.cprint("")
color.cprint(section_title("Tags: "))
if hasattr(pkg, "tags"):
tags = sorted(pkg.tags)
colify(tags, indent=4)
else:
color.cprint(" None")
def print_tests(pkg):
"""output relevant build-time and stand-alone tests"""
# Some built-in base packages (e.g., Autotools) define callback (e.g.,
# check) inherited by descendant packages. These checks may not result
# in build-time testing if the package's build does not implement the
# expected functionality (e.g., a 'check' or 'test' targets).
#
# So the presence of a callback in Spack does not necessarily correspond
# to the actual presence of built-time tests for a package.
for callbacks, phase in [
(pkg.build_time_test_callbacks, "Build"),
(pkg.install_time_test_callbacks, "Install"),
]:
color.cprint("")
color.cprint(section_title("Available {0} Phase Test Methods:".format(phase)))
names = []
if callbacks:
for name in callbacks:
if getattr(pkg, name, False):
names.append(name)
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
# PackageBase defines an empty install/smoke test but we want to know
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
test_pkgs = [
str(cls.test)
for cls in inspect.getmro(pkg_cls)
if issubclass(cls, pkg_base) and cls.test != pkg_base.test
]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
if has_test_method(pkg_cls):
names.append("{0}.test".format(pkg_cls.name.lower()))
except spack.repo.UnknownPackageError:
pass
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
def print_variants(pkg):
"""output variants"""
color.cprint("")
color.cprint(section_title("Variants:"))
formatter = VariantFormatter(pkg.variants)
for line in formatter.lines:
color.cprint(color.cescape(line))
def print_versions(pkg):
"""output versions"""
color.cprint("")
color.cprint(section_title("Preferred version: "))
if not pkg.versions:
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Safe versions: "))
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Deprecated versions: "))
color.cprint(version(" None"))
else:
pad = padder(pkg.versions, 4)
preferred = preferred_version(pkg)
url = ""
if pkg.has_code:
url = fs.for_package_version(pkg, preferred)
line = version(" {0}".format(pad(preferred))) + color.cescape(url)
color.cprint(line)
safe = []
deprecated = []
for v in reversed(sorted(pkg.versions)):
if pkg.has_code:
url = fs.for_package_version(pkg, v)
if pkg.versions[v].get("deprecated", False):
deprecated.append((v, url))
else:
safe.append((v, url))
for title, vers in [("Safe", safe), ("Deprecated", deprecated)]:
color.cprint("")
color.cprint(section_title("{0} versions: ".format(title)))
if not vers:
color.cprint(version(" None"))
continue
for v, url in vers:
line = version(" {0}".format(pad(v))) + color.cescape(url)
color.cprint(line)
def print_virtuals(pkg):
"""output virtual packages"""
color.cprint("")
color.cprint(section_title("Virtual Packages: "))
if pkg.provided:
inverse_map = {}
for spec, whens in pkg.provided.items():
for when in whens:
if when not in inverse_map:
inverse_map[when] = set()
inverse_map[when].add(spec)
for when, specs in reversed(sorted(inverse_map.items())):
line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line)
else:
color.cprint(" None")
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
# Output core package information
header = section_title("{0}: ").format(pkg.build_system_class) + pkg.name
color.cprint(header)
color.cprint("")
color.cprint(section_title("Description:"))
if pkg.__doc__:
color.cprint(color.cescape(pkg.format_doc(indent=4)))
else:
color.cprint(" None")
color.cprint(section_title("Homepage: ") + pkg.homepage)
# Now output optional information in expected order
sections = [
(args.all or args.maintainers, print_maintainers),
(args.all or args.detectable, print_detectable),
(args.all or args.tags, print_tags),
(args.all or not args.no_versions, print_versions),
(args.all or not args.no_variants, print_variants),
(args.all or args.phases, print_phases),
(args.all or not args.no_dependencies, print_dependencies),
(args.all or args.virtuals, print_virtuals),
(args.all or args.tests, print_tests),
]
for print_it, func in sections:
if print_it:
func(pkg)
color.cprint("")
| default | identifier_name |
info.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import inspect
import textwrap
from six.moves import zip_longest
import llnl.util.tty as tty
import llnl.util.tty.color as color
from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
from spack.package_base import has_test_method, preferred_version
description = "get detailed information on a particular package"
section = "basic"
level = "short"
header_color = "@*b"
plain_format = "@."
def padder(str_list, extra=0):
"""Return a function to pad elements of a list."""
length = max(len(str(s)) for s in str_list) + extra
def pad(string):
string = str(string)
padding = max(0, length - len(string))
return string + (padding * " ")
return pad
def setup_parser(subparser):
subparser.add_argument(
"-a", "--all", action="store_true", default=False, help="output all package information"
)
options = [
("--detectable", print_detectable.__doc__),
("--maintainers", print_maintainers.__doc__),
("--no-dependencies", "do not " + print_dependencies.__doc__),
("--no-variants", "do not " + print_variants.__doc__),
("--no-versions", "do not " + print_versions.__doc__),
("--phases", print_phases.__doc__),
("--tags", print_tags.__doc__),
("--tests", print_tests.__doc__),
("--virtuals", print_virtuals.__doc__),
]
for opt, help_comment in options:
subparser.add_argument(opt, action="store_true", help=help_comment)
arguments.add_common_arguments(subparser, ["package"])
def section_title(s):
return header_color + s + plain_format
def version(s):
return spack.spec.version_color + s + plain_format
def variant(s):
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter(object):
def __init__(self, variants):
self.variants = variants
self.headers = ("Name [Default]", "When", "Allowed values", "Description")
# Formats
fmt_name = "{0} [{1}]"
# Initialize column widths with the length of the
# corresponding headers, as they cannot be shorter
# than that
self.column_widths = [len(x) for x in self.headers]
# Expand columns based on max line lengths
for k, e in variants.items():
v, w = e
candidate_max_widths = (
len(fmt_name.format(k, self.default(v))), # Name [Default]
len(str(w)),
len(v.allowed_values), # Allowed values
len(v.description), # Description
)
self.column_widths = (
max(self.column_widths[0], candidate_max_widths[0]),
max(self.column_widths[1], candidate_max_widths[1]),
max(self.column_widths[2], candidate_max_widths[2]),
max(self.column_widths[3], candidate_max_widths[3]),
)
# Don't let name or possible values be less than max widths
_, cols = tty.terminal_size()
max_name = min(self.column_widths[0], 30)
max_when = min(self.column_widths[1], 30)
max_vals = min(self.column_widths[2], 20)
# allow the description column to extend as wide as the terminal.
max_description = min(
self.column_widths[3],
# min width 70 cols, 14 cols of margins and column spacing
max(cols, 70) - max_name - max_vals - 14,
)
self.column_widths = (max_name, max_when, max_vals, max_description)
# Compute the format
self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % (
self.column_widths[0] + 4,
self.column_widths[1] + 4,
self.column_widths[2] + 4,
)
def default(self, v):
s = "on" if v.default is True else "off"
if not isinstance(v.default, bool):
s = v.default
return s
@property
def lines(self):
if not self.variants:
yield " None"
else:
yield " " + self.fmt % self.headers
underline = tuple([w * "=" for w in self.column_widths])
yield " " + self.fmt % underline
yield ""
for k, e in sorted(self.variants.items()):
v, w = e
name = textwrap.wrap(
"{0} [{1}]".format(k, self.default(v)), width=self.column_widths[0]
)
if all(spec == spack.spec.Spec() for spec in w):
w = "--"
when = textwrap.wrap(str(w), width=self.column_widths[1])
allowed = v.allowed_values.replace("True, False", "on, off")
allowed = textwrap.wrap(allowed, width=self.column_widths[2])
description = []
for d_line in v.description.split("\n"):
description += textwrap.wrap(d_line, width=self.column_widths[3])
for t in zip_longest(name, when, allowed, description, fillvalue=""):
yield " " + self.fmt % t
def print_dependencies(pkg):
"""output build, link, and run package dependencies"""
for deptype in ("build", "link", "run"):
color.cprint("")
color.cprint(section_title("%s Dependencies:" % deptype.capitalize()))
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
color.cprint(" None")
def print_detectable(pkg):
"""output information on external detection"""
color.cprint("")
color.cprint(section_title("Externally Detectable: "))
# If the package has an 'executables' of 'libraries' field, it
# can detect an installation
if hasattr(pkg, "executables") or hasattr(pkg, "libraries"):
find_attributes = []
if hasattr(pkg, "determine_version"):
find_attributes.append("version")
if hasattr(pkg, "determine_variants"):
find_attributes.append("variants")
# If the package does not define 'determine_version' nor
# 'determine_variants', then it must use some custom detection
# mechanism. In this case, just inform the user it's detectable somehow.
color.cprint(
" True{0}".format(
" (" + ", ".join(find_attributes) + ")" if find_attributes else ""
)
)
else:
color.cprint(" False")
def print_maintainers(pkg):
"""output package maintainers"""
if len(pkg.maintainers) > 0:
mnt = " ".join(["@@" + m for m in pkg.maintainers])
color.cprint("")
color.cprint(section_title("Maintainers: ") + mnt)
def print_phases(pkg):
"""output installation phases"""
if hasattr(pkg, "phases") and pkg.phases:
color.cprint("")
color.cprint(section_title("Installation Phases:"))
phase_str = ""
for phase in pkg.phases:
phase_str += " {0}".format(phase)
color.cprint(phase_str)
def print_tags(pkg):
"""output package tags"""
color.cprint("")
color.cprint(section_title("Tags: "))
if hasattr(pkg, "tags"):
tags = sorted(pkg.tags)
colify(tags, indent=4)
else:
color.cprint(" None")
def print_tests(pkg):
"""output relevant build-time and stand-alone tests"""
# Some built-in base packages (e.g., Autotools) define callback (e.g.,
# check) inherited by descendant packages. These checks may not result
# in build-time testing if the package's build does not implement the
# expected functionality (e.g., a 'check' or 'test' targets).
#
# So the presence of a callback in Spack does not necessarily correspond
# to the actual presence of built-time tests for a package.
for callbacks, phase in [
(pkg.build_time_test_callbacks, "Build"),
(pkg.install_time_test_callbacks, "Install"),
]:
color.cprint("")
color.cprint(section_title("Available {0} Phase Test Methods:".format(phase)))
names = []
if callbacks:
for name in callbacks:
if getattr(pkg, name, False):
names.append(name)
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
# PackageBase defines an empty install/smoke test but we want to know
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
test_pkgs = [
str(cls.test)
for cls in inspect.getmro(pkg_cls)
if issubclass(cls, pkg_base) and cls.test != pkg_base.test
]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
if has_test_method(pkg_cls):
names.append("{0}.test".format(pkg_cls.name.lower()))
except spack.repo.UnknownPackageError:
pass
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
def print_variants(pkg):
"""output variants"""
color.cprint("")
color.cprint(section_title("Variants:"))
formatter = VariantFormatter(pkg.variants)
for line in formatter.lines:
color.cprint(color.cescape(line))
def print_versions(pkg):
"""output versions"""
color.cprint("")
color.cprint(section_title("Preferred version: "))
if not pkg.versions:
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Safe versions: "))
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Deprecated versions: "))
color.cprint(version(" None"))
else:
pad = padder(pkg.versions, 4)
preferred = preferred_version(pkg)
url = ""
if pkg.has_code:
url = fs.for_package_version(pkg, preferred)
line = version(" {0}".format(pad(preferred))) + color.cescape(url)
color.cprint(line)
safe = []
deprecated = []
for v in reversed(sorted(pkg.versions)):
if pkg.has_code:
url = fs.for_package_version(pkg, v)
if pkg.versions[v].get("deprecated", False):
deprecated.append((v, url))
else:
safe.append((v, url))
for title, vers in [("Safe", safe), ("Deprecated", deprecated)]:
color.cprint("")
color.cprint(section_title("{0} versions: ".format(title)))
if not vers:
color.cprint(version(" None"))
continue
for v, url in vers:
line = version(" {0}".format(pad(v))) + color.cescape(url)
color.cprint(line)
def print_virtuals(pkg):
"""output virtual packages"""
color.cprint("")
color.cprint(section_title("Virtual Packages: "))
if pkg.provided:
inverse_map = {}
for spec, whens in pkg.provided.items():
for when in whens:
if when not in inverse_map:
inverse_map[when] = set()
inverse_map[when].add(spec)
for when, specs in reversed(sorted(inverse_map.items())):
line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line)
else:
color.cprint(" None")
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
# Output core package information
header = section_title("{0}: ").format(pkg.build_system_class) + pkg.name
color.cprint(header)
color.cprint("")
color.cprint(section_title("Description:"))
if pkg.__doc__:
color.cprint(color.cescape(pkg.format_doc(indent=4)))
else:
|
color.cprint(section_title("Homepage: ") + pkg.homepage)
# Now output optional information in expected order
sections = [
(args.all or args.maintainers, print_maintainers),
(args.all or args.detectable, print_detectable),
(args.all or args.tags, print_tags),
(args.all or not args.no_versions, print_versions),
(args.all or not args.no_variants, print_variants),
(args.all or args.phases, print_phases),
(args.all or not args.no_dependencies, print_dependencies),
(args.all or args.virtuals, print_virtuals),
(args.all or args.tests, print_tests),
]
for print_it, func in sections:
if print_it:
func(pkg)
color.cprint("")
| color.cprint(" None") | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.